diff options
Diffstat (limited to 'drivers')
49 files changed, 3878 insertions, 2824 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 7e4acad3f6f9..7c1786df6213 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2659,6 +2659,27 @@ int drm_mode_set_config_internal(struct drm_mode_set *set) | |||
2659 | EXPORT_SYMBOL(drm_mode_set_config_internal); | 2659 | EXPORT_SYMBOL(drm_mode_set_config_internal); |
2660 | 2660 | ||
2661 | /** | 2661 | /** |
2662 | * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode | ||
2663 | * @mode: mode to query | ||
2664 | * @hdisplay: hdisplay value to fill in | ||
2665 | * @vdisplay: vdisplay value to fill in | ||
2666 | * | ||
2667 | * The vdisplay value will be doubled if the specified mode is a stereo mode of | ||
2668 | * the appropriate layout. | ||
2669 | */ | ||
2670 | void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, | ||
2671 | int *hdisplay, int *vdisplay) | ||
2672 | { | ||
2673 | struct drm_display_mode adjusted; | ||
2674 | |||
2675 | drm_mode_copy(&adjusted, mode); | ||
2676 | drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY); | ||
2677 | *hdisplay = adjusted.crtc_hdisplay; | ||
2678 | *vdisplay = adjusted.crtc_vdisplay; | ||
2679 | } | ||
2680 | EXPORT_SYMBOL(drm_crtc_get_hv_timing); | ||
2681 | |||
2682 | /** | ||
2662 | * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the | 2683 | * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the |
2663 | * CRTC viewport | 2684 | * CRTC viewport |
2664 | * @crtc: CRTC that framebuffer will be displayed on | 2685 | * @crtc: CRTC that framebuffer will be displayed on |
@@ -2675,16 +2696,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc, | |||
2675 | { | 2696 | { |
2676 | int hdisplay, vdisplay; | 2697 | int hdisplay, vdisplay; |
2677 | 2698 | ||
2678 | hdisplay = mode->hdisplay; | 2699 | drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); |
2679 | vdisplay = mode->vdisplay; | ||
2680 | |||
2681 | if (drm_mode_is_stereo(mode)) { | ||
2682 | struct drm_display_mode adjusted = *mode; | ||
2683 | |||
2684 | drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE); | ||
2685 | hdisplay = adjusted.crtc_hdisplay; | ||
2686 | vdisplay = adjusted.crtc_vdisplay; | ||
2687 | } | ||
2688 | 2700 | ||
2689 | if (crtc->invert_dimensions) | 2701 | if (crtc->invert_dimensions) |
2690 | swap(hdisplay, vdisplay); | 2702 | swap(hdisplay, vdisplay); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 11cc4deca55b..5125aa91e66f 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -739,6 +739,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh); | |||
739 | * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for | 739 | * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for |
740 | * buffers containing two eyes (only adjust the timings when needed, eg. for | 740 | * buffers containing two eyes (only adjust the timings when needed, eg. for |
741 | * "frame packing" or "side by side full"). | 741 | * "frame packing" or "side by side full"). |
742 | * - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not* | ||
743 | * be performed for doublescan and vscan > 1 modes respectively. | ||
742 | */ | 744 | */ |
743 | void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) | 745 | void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) |
744 | { | 746 | { |
@@ -765,18 +767,22 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) | |||
765 | } | 767 | } |
766 | } | 768 | } |
767 | 769 | ||
768 | if (p->flags & DRM_MODE_FLAG_DBLSCAN) { | 770 | if (!(adjust_flags & CRTC_NO_DBLSCAN)) { |
769 | p->crtc_vdisplay *= 2; | 771 | if (p->flags & DRM_MODE_FLAG_DBLSCAN) { |
770 | p->crtc_vsync_start *= 2; | 772 | p->crtc_vdisplay *= 2; |
771 | p->crtc_vsync_end *= 2; | 773 | p->crtc_vsync_start *= 2; |
772 | p->crtc_vtotal *= 2; | 774 | p->crtc_vsync_end *= 2; |
775 | p->crtc_vtotal *= 2; | ||
776 | } | ||
773 | } | 777 | } |
774 | 778 | ||
775 | if (p->vscan > 1) { | 779 | if (!(adjust_flags & CRTC_NO_VSCAN)) { |
776 | p->crtc_vdisplay *= p->vscan; | 780 | if (p->vscan > 1) { |
777 | p->crtc_vsync_start *= p->vscan; | 781 | p->crtc_vdisplay *= p->vscan; |
778 | p->crtc_vsync_end *= p->vscan; | 782 | p->crtc_vsync_start *= p->vscan; |
779 | p->crtc_vtotal *= p->vscan; | 783 | p->crtc_vsync_end *= p->vscan; |
784 | p->crtc_vtotal *= p->vscan; | ||
785 | } | ||
780 | } | 786 | } |
781 | 787 | ||
782 | if (adjust_flags & CRTC_STEREO_DOUBLE) { | 788 | if (adjust_flags & CRTC_STEREO_DOUBLE) { |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e4083e41a600..1849ffae61ae 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -19,6 +19,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o | |||
19 | 19 | ||
20 | # GEM code | 20 | # GEM code |
21 | i915-y += i915_cmd_parser.o \ | 21 | i915-y += i915_cmd_parser.o \ |
22 | i915_gem_batch_pool.o \ | ||
22 | i915_gem_context.o \ | 23 | i915_gem_context.o \ |
23 | i915_gem_render_state.o \ | 24 | i915_gem_render_state.o \ |
24 | i915_gem_debug.o \ | 25 | i915_gem_debug.o \ |
@@ -47,6 +48,7 @@ i915-y += intel_renderstate_gen6.o \ | |||
47 | i915-y += intel_audio.o \ | 48 | i915-y += intel_audio.o \ |
48 | intel_bios.o \ | 49 | intel_bios.o \ |
49 | intel_display.o \ | 50 | intel_display.o \ |
51 | intel_fbc.o \ | ||
50 | intel_fifo_underrun.o \ | 52 | intel_fifo_underrun.o \ |
51 | intel_frontbuffer.o \ | 53 | intel_frontbuffer.o \ |
52 | intel_modes.o \ | 54 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 22c992a78ac6..806e812340d0 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c | |||
@@ -152,6 +152,7 @@ static const struct drm_i915_cmd_descriptor render_cmds[] = { | |||
152 | CMD( MI_PREDICATE, SMI, F, 1, S ), | 152 | CMD( MI_PREDICATE, SMI, F, 1, S ), |
153 | CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ), | 153 | CMD( MI_TOPOLOGY_FILTER, SMI, F, 1, S ), |
154 | CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), | 154 | CMD( MI_DISPLAY_FLIP, SMI, !F, 0xFF, R ), |
155 | CMD( MI_SET_APPID, SMI, F, 1, S ), | ||
155 | CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ), | 156 | CMD( MI_SET_CONTEXT, SMI, !F, 0xFF, R ), |
156 | CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ), | 157 | CMD( MI_URB_CLEAR, SMI, !F, 0xFF, S ), |
157 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B, | 158 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3F, B, |
@@ -210,6 +211,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { | |||
210 | CMD( MI_SET_PREDICATE, SMI, F, 1, S ), | 211 | CMD( MI_SET_PREDICATE, SMI, F, 1, S ), |
211 | CMD( MI_RS_CONTROL, SMI, F, 1, S ), | 212 | CMD( MI_RS_CONTROL, SMI, F, 1, S ), |
212 | CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), | 213 | CMD( MI_URB_ATOMIC_ALLOC, SMI, F, 1, S ), |
214 | CMD( MI_SET_APPID, SMI, F, 1, S ), | ||
213 | CMD( MI_RS_CONTEXT, SMI, F, 1, S ), | 215 | CMD( MI_RS_CONTEXT, SMI, F, 1, S ), |
214 | CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), | 216 | CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), |
215 | CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), | 217 | CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), |
@@ -229,6 +231,7 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = { | |||
229 | 231 | ||
230 | static const struct drm_i915_cmd_descriptor video_cmds[] = { | 232 | static const struct drm_i915_cmd_descriptor video_cmds[] = { |
231 | CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), | 233 | CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), |
234 | CMD( MI_SET_APPID, SMI, F, 1, S ), | ||
232 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, | 235 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, |
233 | .bits = {{ | 236 | .bits = {{ |
234 | .offset = 0, | 237 | .offset = 0, |
@@ -272,6 +275,7 @@ static const struct drm_i915_cmd_descriptor video_cmds[] = { | |||
272 | 275 | ||
273 | static const struct drm_i915_cmd_descriptor vecs_cmds[] = { | 276 | static const struct drm_i915_cmd_descriptor vecs_cmds[] = { |
274 | CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), | 277 | CMD( MI_ARB_ON_OFF, SMI, F, 1, R ), |
278 | CMD( MI_SET_APPID, SMI, F, 1, S ), | ||
275 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, | 279 | CMD( MI_STORE_DWORD_IMM, SMI, !F, 0xFF, B, |
276 | .bits = {{ | 280 | .bits = {{ |
277 | .offset = 0, | 281 | .offset = 0, |
@@ -401,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = { | |||
401 | #define REG64(addr) (addr), (addr + sizeof(u32)) | 405 | #define REG64(addr) (addr), (addr + sizeof(u32)) |
402 | 406 | ||
403 | static const u32 gen7_render_regs[] = { | 407 | static const u32 gen7_render_regs[] = { |
408 | REG64(GPGPU_THREADS_DISPATCHED), | ||
404 | REG64(HS_INVOCATION_COUNT), | 409 | REG64(HS_INVOCATION_COUNT), |
405 | REG64(DS_INVOCATION_COUNT), | 410 | REG64(DS_INVOCATION_COUNT), |
406 | REG64(IA_VERTICES_COUNT), | 411 | REG64(IA_VERTICES_COUNT), |
@@ -481,13 +486,17 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header) | |||
481 | u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; | 486 | u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; |
482 | u32 subclient = | 487 | u32 subclient = |
483 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; | 488 | (cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT; |
489 | u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT; | ||
484 | 490 | ||
485 | if (client == INSTR_MI_CLIENT) | 491 | if (client == INSTR_MI_CLIENT) |
486 | return 0x3F; | 492 | return 0x3F; |
487 | else if (client == INSTR_RC_CLIENT) { | 493 | else if (client == INSTR_RC_CLIENT) { |
488 | if (subclient == INSTR_MEDIA_SUBCLIENT) | 494 | if (subclient == INSTR_MEDIA_SUBCLIENT) { |
489 | return 0xFFF; | 495 | if (op == 6) |
490 | else | 496 | return 0xFFFF; |
497 | else | ||
498 | return 0xFFF; | ||
499 | } else | ||
491 | return 0xFF; | 500 | return 0xFF; |
492 | } | 501 | } |
493 | 502 | ||
@@ -716,13 +725,13 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) | |||
716 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); | 725 | BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); |
717 | BUG_ON(!validate_regs_sorted(ring)); | 726 | BUG_ON(!validate_regs_sorted(ring)); |
718 | 727 | ||
719 | if (hash_empty(ring->cmd_hash)) { | 728 | WARN_ON(!hash_empty(ring->cmd_hash)); |
720 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); | 729 | |
721 | if (ret) { | 730 | ret = init_hash_table(ring, cmd_tables, cmd_table_count); |
722 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); | 731 | if (ret) { |
723 | fini_hash_table(ring); | 732 | DRM_ERROR("CMD: cmd_parser_init failed!\n"); |
724 | return ret; | 733 | fini_hash_table(ring); |
725 | } | 734 | return ret; |
726 | } | 735 | } |
727 | 736 | ||
728 | ring->needs_cmd_parser = true; | 737 | ring->needs_cmd_parser = true; |
@@ -840,6 +849,69 @@ finish: | |||
840 | return (u32*)addr; | 849 | return (u32*)addr; |
841 | } | 850 | } |
842 | 851 | ||
852 | /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ | ||
853 | static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, | ||
854 | struct drm_i915_gem_object *src_obj, | ||
855 | u32 batch_start_offset, | ||
856 | u32 batch_len) | ||
857 | { | ||
858 | int ret = 0; | ||
859 | int needs_clflush = 0; | ||
860 | u32 *src_base, *dest_base = NULL; | ||
861 | u32 *src_addr, *dest_addr; | ||
862 | u32 offset = batch_start_offset / sizeof(*dest_addr); | ||
863 | u32 end = batch_start_offset + batch_len; | ||
864 | |||
865 | if (end > dest_obj->base.size || end > src_obj->base.size) | ||
866 | return ERR_PTR(-E2BIG); | ||
867 | |||
868 | ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); | ||
869 | if (ret) { | ||
870 | DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); | ||
871 | return ERR_PTR(ret); | ||
872 | } | ||
873 | |||
874 | src_base = vmap_batch(src_obj); | ||
875 | if (!src_base) { | ||
876 | DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); | ||
877 | ret = -ENOMEM; | ||
878 | goto unpin_src; | ||
879 | } | ||
880 | |||
881 | src_addr = src_base + offset; | ||
882 | |||
883 | if (needs_clflush) | ||
884 | drm_clflush_virt_range((char *)src_addr, batch_len); | ||
885 | |||
886 | ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); | ||
887 | if (ret) { | ||
888 | DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n"); | ||
889 | goto unmap_src; | ||
890 | } | ||
891 | |||
892 | dest_base = vmap_batch(dest_obj); | ||
893 | if (!dest_base) { | ||
894 | DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); | ||
895 | ret = -ENOMEM; | ||
896 | goto unmap_src; | ||
897 | } | ||
898 | |||
899 | dest_addr = dest_base + offset; | ||
900 | |||
901 | if (batch_start_offset != 0) | ||
902 | memset((u8 *)dest_base, 0, batch_start_offset); | ||
903 | |||
904 | memcpy(dest_addr, src_addr, batch_len); | ||
905 | memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end); | ||
906 | |||
907 | unmap_src: | ||
908 | vunmap(src_base); | ||
909 | unpin_src: | ||
910 | i915_gem_object_unpin_pages(src_obj); | ||
911 | |||
912 | return ret ? ERR_PTR(ret) : dest_base; | ||
913 | } | ||
914 | |||
843 | /** | 915 | /** |
844 | * i915_needs_cmd_parser() - should a given ring use software command parsing? | 916 | * i915_needs_cmd_parser() - should a given ring use software command parsing? |
845 | * @ring: the ring in question | 917 | * @ring: the ring in question |
@@ -956,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
956 | * i915_parse_cmds() - parse a submitted batch buffer for privilege violations | 1028 | * i915_parse_cmds() - parse a submitted batch buffer for privilege violations |
957 | * @ring: the ring on which the batch is to execute | 1029 | * @ring: the ring on which the batch is to execute |
958 | * @batch_obj: the batch buffer in question | 1030 | * @batch_obj: the batch buffer in question |
1031 | * @shadow_batch_obj: copy of the batch buffer in question | ||
959 | * @batch_start_offset: byte offset in the batch at which execution starts | 1032 | * @batch_start_offset: byte offset in the batch at which execution starts |
1033 | * @batch_len: length of the commands in batch_obj | ||
960 | * @is_master: is the submitting process the drm master? | 1034 | * @is_master: is the submitting process the drm master? |
961 | * | 1035 | * |
962 | * Parses the specified batch buffer looking for privilege violations as | 1036 | * Parses the specified batch buffer looking for privilege violations as |
@@ -967,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring, | |||
967 | */ | 1041 | */ |
968 | int i915_parse_cmds(struct intel_engine_cs *ring, | 1042 | int i915_parse_cmds(struct intel_engine_cs *ring, |
969 | struct drm_i915_gem_object *batch_obj, | 1043 | struct drm_i915_gem_object *batch_obj, |
1044 | struct drm_i915_gem_object *shadow_batch_obj, | ||
970 | u32 batch_start_offset, | 1045 | u32 batch_start_offset, |
1046 | u32 batch_len, | ||
971 | bool is_master) | 1047 | bool is_master) |
972 | { | 1048 | { |
973 | int ret = 0; | 1049 | int ret = 0; |
974 | u32 *cmd, *batch_base, *batch_end; | 1050 | u32 *cmd, *batch_base, *batch_end; |
975 | struct drm_i915_cmd_descriptor default_desc = { 0 }; | 1051 | struct drm_i915_cmd_descriptor default_desc = { 0 }; |
976 | int needs_clflush = 0; | ||
977 | bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ | 1052 | bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ |
978 | 1053 | ||
979 | ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush); | 1054 | ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0); |
980 | if (ret) { | 1055 | if (ret) { |
981 | DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); | 1056 | DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n"); |
982 | return ret; | 1057 | return -1; |
983 | } | 1058 | } |
984 | 1059 | ||
985 | batch_base = vmap_batch(batch_obj); | 1060 | batch_base = copy_batch(shadow_batch_obj, batch_obj, |
986 | if (!batch_base) { | 1061 | batch_start_offset, batch_len); |
987 | DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); | 1062 | if (IS_ERR(batch_base)) { |
988 | i915_gem_object_unpin_pages(batch_obj); | 1063 | DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); |
989 | return -ENOMEM; | 1064 | i915_gem_object_ggtt_unpin(shadow_batch_obj); |
1065 | return PTR_ERR(batch_base); | ||
990 | } | 1066 | } |
991 | 1067 | ||
992 | if (needs_clflush) | ||
993 | drm_clflush_virt_range((char *)batch_base, batch_obj->base.size); | ||
994 | |||
995 | cmd = batch_base + (batch_start_offset / sizeof(*cmd)); | 1068 | cmd = batch_base + (batch_start_offset / sizeof(*cmd)); |
996 | batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end)); | 1069 | |
1070 | /* | ||
1071 | * We use the batch length as size because the shadow object is as | ||
1072 | * large or larger and copy_batch() will write MI_NOPs to the extra | ||
1073 | * space. Parsing should be faster in some cases this way. | ||
1074 | */ | ||
1075 | batch_end = cmd + (batch_len / sizeof(*batch_end)); | ||
997 | 1076 | ||
998 | while (cmd < batch_end) { | 1077 | while (cmd < batch_end) { |
999 | const struct drm_i915_cmd_descriptor *desc; | 1078 | const struct drm_i915_cmd_descriptor *desc; |
@@ -1053,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, | |||
1053 | } | 1132 | } |
1054 | 1133 | ||
1055 | vunmap(batch_base); | 1134 | vunmap(batch_base); |
1056 | 1135 | i915_gem_object_ggtt_unpin(shadow_batch_obj); | |
1057 | i915_gem_object_unpin_pages(batch_obj); | ||
1058 | 1136 | ||
1059 | return ret; | 1137 | return ret; |
1060 | } | 1138 | } |
@@ -1076,6 +1154,7 @@ int i915_cmd_parser_get_version(void) | |||
1076 | * hardware parsing enabled (so does not allow new use cases). | 1154 | * hardware parsing enabled (so does not allow new use cases). |
1077 | * 2. Allow access to the MI_PREDICATE_SRC0 and | 1155 | * 2. Allow access to the MI_PREDICATE_SRC0 and |
1078 | * MI_PREDICATE_SRC1 registers. | 1156 | * MI_PREDICATE_SRC1 registers. |
1157 | * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. | ||
1079 | */ | 1158 | */ |
1080 | return 2; | 1159 | return 3; |
1081 | } | 1160 | } |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 779a275eb1fd..e515aad47858 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -96,9 +96,7 @@ static int i915_capabilities(struct seq_file *m, void *data) | |||
96 | 96 | ||
97 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) | 97 | static const char *get_pin_flag(struct drm_i915_gem_object *obj) |
98 | { | 98 | { |
99 | if (obj->user_pin_count > 0) | 99 | if (i915_gem_obj_is_pinned(obj)) |
100 | return "P"; | ||
101 | else if (i915_gem_obj_is_pinned(obj)) | ||
102 | return "p"; | 100 | return "p"; |
103 | else | 101 | else |
104 | return " "; | 102 | return " "; |
@@ -133,9 +131,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
133 | obj->base.size / 1024, | 131 | obj->base.size / 1024, |
134 | obj->base.read_domains, | 132 | obj->base.read_domains, |
135 | obj->base.write_domain, | 133 | obj->base.write_domain, |
136 | obj->last_read_seqno, | 134 | i915_gem_request_get_seqno(obj->last_read_req), |
137 | obj->last_write_seqno, | 135 | i915_gem_request_get_seqno(obj->last_write_req), |
138 | obj->last_fenced_seqno, | 136 | i915_gem_request_get_seqno(obj->last_fenced_req), |
139 | i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), | 137 | i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level), |
140 | obj->dirty ? " dirty" : "", | 138 | obj->dirty ? " dirty" : "", |
141 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); | 139 | obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); |
@@ -154,8 +152,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
154 | seq_puts(m, " (pp"); | 152 | seq_puts(m, " (pp"); |
155 | else | 153 | else |
156 | seq_puts(m, " (g"); | 154 | seq_puts(m, " (g"); |
157 | seq_printf(m, "gtt offset: %08lx, size: %08lx)", | 155 | seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)", |
158 | vma->node.start, vma->node.size); | 156 | vma->node.start, vma->node.size, |
157 | vma->ggtt_view.type); | ||
159 | } | 158 | } |
160 | if (obj->stolen) | 159 | if (obj->stolen) |
161 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); | 160 | seq_printf(m, " (stolen: %08lx)", obj->stolen->start); |
@@ -168,8 +167,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | |||
168 | *t = '\0'; | 167 | *t = '\0'; |
169 | seq_printf(m, " (%s mappable)", s); | 168 | seq_printf(m, " (%s mappable)", s); |
170 | } | 169 | } |
171 | if (obj->ring != NULL) | 170 | if (obj->last_read_req != NULL) |
172 | seq_printf(m, " (%s)", obj->ring->name); | 171 | seq_printf(m, " (%s)", |
172 | i915_gem_request_get_ring(obj->last_read_req)->name); | ||
173 | if (obj->frontbuffer_bits) | 173 | if (obj->frontbuffer_bits) |
174 | seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); | 174 | seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); |
175 | } | 175 | } |
@@ -336,7 +336,7 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
336 | if (ppgtt->file_priv != stats->file_priv) | 336 | if (ppgtt->file_priv != stats->file_priv) |
337 | continue; | 337 | continue; |
338 | 338 | ||
339 | if (obj->ring) /* XXX per-vma statistic */ | 339 | if (obj->active) /* XXX per-vma statistic */ |
340 | stats->active += obj->base.size; | 340 | stats->active += obj->base.size; |
341 | else | 341 | else |
342 | stats->inactive += obj->base.size; | 342 | stats->inactive += obj->base.size; |
@@ -346,7 +346,7 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
346 | } else { | 346 | } else { |
347 | if (i915_gem_obj_ggtt_bound(obj)) { | 347 | if (i915_gem_obj_ggtt_bound(obj)) { |
348 | stats->global += obj->base.size; | 348 | stats->global += obj->base.size; |
349 | if (obj->ring) | 349 | if (obj->active) |
350 | stats->active += obj->base.size; | 350 | stats->active += obj->base.size; |
351 | else | 351 | else |
352 | stats->inactive += obj->base.size; | 352 | stats->inactive += obj->base.size; |
@@ -360,6 +360,33 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
360 | return 0; | 360 | return 0; |
361 | } | 361 | } |
362 | 362 | ||
363 | #define print_file_stats(m, name, stats) \ | ||
364 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", \ | ||
365 | name, \ | ||
366 | stats.count, \ | ||
367 | stats.total, \ | ||
368 | stats.active, \ | ||
369 | stats.inactive, \ | ||
370 | stats.global, \ | ||
371 | stats.shared, \ | ||
372 | stats.unbound) | ||
373 | |||
374 | static void print_batch_pool_stats(struct seq_file *m, | ||
375 | struct drm_i915_private *dev_priv) | ||
376 | { | ||
377 | struct drm_i915_gem_object *obj; | ||
378 | struct file_stats stats; | ||
379 | |||
380 | memset(&stats, 0, sizeof(stats)); | ||
381 | |||
382 | list_for_each_entry(obj, | ||
383 | &dev_priv->mm.batch_pool.cache_list, | ||
384 | batch_pool_list) | ||
385 | per_file_stats(0, obj, &stats); | ||
386 | |||
387 | print_file_stats(m, "batch pool", stats); | ||
388 | } | ||
389 | |||
363 | #define count_vmas(list, member) do { \ | 390 | #define count_vmas(list, member) do { \ |
364 | list_for_each_entry(vma, list, member) { \ | 391 | list_for_each_entry(vma, list, member) { \ |
365 | size += i915_gem_obj_ggtt_size(vma->obj); \ | 392 | size += i915_gem_obj_ggtt_size(vma->obj); \ |
@@ -442,6 +469,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
442 | dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); | 469 | dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); |
443 | 470 | ||
444 | seq_putc(m, '\n'); | 471 | seq_putc(m, '\n'); |
472 | print_batch_pool_stats(m, dev_priv); | ||
473 | |||
474 | seq_putc(m, '\n'); | ||
445 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { | 475 | list_for_each_entry_reverse(file, &dev->filelist, lhead) { |
446 | struct file_stats stats; | 476 | struct file_stats stats; |
447 | struct task_struct *task; | 477 | struct task_struct *task; |
@@ -459,15 +489,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
459 | */ | 489 | */ |
460 | rcu_read_lock(); | 490 | rcu_read_lock(); |
461 | task = pid_task(file->pid, PIDTYPE_PID); | 491 | task = pid_task(file->pid, PIDTYPE_PID); |
462 | seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu global, %zu shared, %zu unbound)\n", | 492 | print_file_stats(m, task ? task->comm : "<unknown>", stats); |
463 | task ? task->comm : "<unknown>", | ||
464 | stats.count, | ||
465 | stats.total, | ||
466 | stats.active, | ||
467 | stats.inactive, | ||
468 | stats.global, | ||
469 | stats.shared, | ||
470 | stats.unbound); | ||
471 | rcu_read_unlock(); | 493 | rcu_read_unlock(); |
472 | } | 494 | } |
473 | 495 | ||
@@ -543,14 +565,16 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
543 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", | 565 | seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", |
544 | pipe, plane); | 566 | pipe, plane); |
545 | } | 567 | } |
546 | if (work->flip_queued_ring) { | 568 | if (work->flip_queued_req) { |
569 | struct intel_engine_cs *ring = | ||
570 | i915_gem_request_get_ring(work->flip_queued_req); | ||
571 | |||
547 | seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n", | 572 | seq_printf(m, "Flip queued on %s at seqno %u, next seqno %u [current breadcrumb %u], completed? %d\n", |
548 | work->flip_queued_ring->name, | 573 | ring->name, |
549 | work->flip_queued_seqno, | 574 | i915_gem_request_get_seqno(work->flip_queued_req), |
550 | dev_priv->next_seqno, | 575 | dev_priv->next_seqno, |
551 | work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), | 576 | ring->get_seqno(ring, true), |
552 | i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), | 577 | i915_gem_request_completed(work->flip_queued_req, true)); |
553 | work->flip_queued_seqno)); | ||
554 | } else | 578 | } else |
555 | seq_printf(m, "Flip not associated with any ring\n"); | 579 | seq_printf(m, "Flip not associated with any ring\n"); |
556 | seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", | 580 | seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", |
@@ -582,6 +606,36 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) | |||
582 | return 0; | 606 | return 0; |
583 | } | 607 | } |
584 | 608 | ||
609 | static int i915_gem_batch_pool_info(struct seq_file *m, void *data) | ||
610 | { | ||
611 | struct drm_info_node *node = m->private; | ||
612 | struct drm_device *dev = node->minor->dev; | ||
613 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
614 | struct drm_i915_gem_object *obj; | ||
615 | int count = 0; | ||
616 | int ret; | ||
617 | |||
618 | ret = mutex_lock_interruptible(&dev->struct_mutex); | ||
619 | if (ret) | ||
620 | return ret; | ||
621 | |||
622 | seq_puts(m, "cache:\n"); | ||
623 | list_for_each_entry(obj, | ||
624 | &dev_priv->mm.batch_pool.cache_list, | ||
625 | batch_pool_list) { | ||
626 | seq_puts(m, " "); | ||
627 | describe_obj(m, obj); | ||
628 | seq_putc(m, '\n'); | ||
629 | count++; | ||
630 | } | ||
631 | |||
632 | seq_printf(m, "total: %d\n", count); | ||
633 | |||
634 | mutex_unlock(&dev->struct_mutex); | ||
635 | |||
636 | return 0; | ||
637 | } | ||
638 | |||
585 | static int i915_gem_request_info(struct seq_file *m, void *data) | 639 | static int i915_gem_request_info(struct seq_file *m, void *data) |
586 | { | 640 | { |
587 | struct drm_info_node *node = m->private; | 641 | struct drm_info_node *node = m->private; |
@@ -2155,6 +2209,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2155 | struct drm_device *dev = node->minor->dev; | 2209 | struct drm_device *dev = node->minor->dev; |
2156 | struct drm_i915_private *dev_priv = dev->dev_private; | 2210 | struct drm_i915_private *dev_priv = dev->dev_private; |
2157 | u32 psrperf = 0; | 2211 | u32 psrperf = 0; |
2212 | u32 stat[3]; | ||
2213 | enum pipe pipe; | ||
2158 | bool enabled = false; | 2214 | bool enabled = false; |
2159 | 2215 | ||
2160 | intel_runtime_pm_get(dev_priv); | 2216 | intel_runtime_pm_get(dev_priv); |
@@ -2169,14 +2225,36 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) | |||
2169 | seq_printf(m, "Re-enable work scheduled: %s\n", | 2225 | seq_printf(m, "Re-enable work scheduled: %s\n", |
2170 | yesno(work_busy(&dev_priv->psr.work.work))); | 2226 | yesno(work_busy(&dev_priv->psr.work.work))); |
2171 | 2227 | ||
2172 | enabled = HAS_PSR(dev) && | 2228 | if (HAS_PSR(dev)) { |
2173 | I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | 2229 | if (HAS_DDI(dev)) |
2174 | seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled)); | 2230 | enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; |
2231 | else { | ||
2232 | for_each_pipe(dev_priv, pipe) { | ||
2233 | stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & | ||
2234 | VLV_EDP_PSR_CURR_STATE_MASK; | ||
2235 | if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || | ||
2236 | (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) | ||
2237 | enabled = true; | ||
2238 | } | ||
2239 | } | ||
2240 | } | ||
2241 | seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); | ||
2242 | |||
2243 | if (!HAS_DDI(dev)) | ||
2244 | for_each_pipe(dev_priv, pipe) { | ||
2245 | if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || | ||
2246 | (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) | ||
2247 | seq_printf(m, " pipe %c", pipe_name(pipe)); | ||
2248 | } | ||
2249 | seq_puts(m, "\n"); | ||
2175 | 2250 | ||
2176 | if (HAS_PSR(dev)) | 2251 | /* CHV PSR has no kind of performance counter */ |
2252 | if (HAS_PSR(dev) && HAS_DDI(dev)) { | ||
2177 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & | 2253 | psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & |
2178 | EDP_PSR_PERF_CNT_MASK; | 2254 | EDP_PSR_PERF_CNT_MASK; |
2179 | seq_printf(m, "Performance_Counter: %u\n", psrperf); | 2255 | |
2256 | seq_printf(m, "Performance_Counter: %u\n", psrperf); | ||
2257 | } | ||
2180 | mutex_unlock(&dev_priv->psr.lock); | 2258 | mutex_unlock(&dev_priv->psr.lock); |
2181 | 2259 | ||
2182 | intel_runtime_pm_put(dev_priv); | 2260 | intel_runtime_pm_put(dev_priv); |
@@ -2322,7 +2400,7 @@ static const char *power_domain_str(enum intel_display_power_domain domain) | |||
2322 | case POWER_DOMAIN_INIT: | 2400 | case POWER_DOMAIN_INIT: |
2323 | return "INIT"; | 2401 | return "INIT"; |
2324 | default: | 2402 | default: |
2325 | WARN_ON(1); | 2403 | MISSING_CASE(domain); |
2326 | return "?"; | 2404 | return "?"; |
2327 | } | 2405 | } |
2328 | } | 2406 | } |
@@ -2718,6 +2796,9 @@ static int i915_ddb_info(struct seq_file *m, void *unused) | |||
2718 | enum pipe pipe; | 2796 | enum pipe pipe; |
2719 | int plane; | 2797 | int plane; |
2720 | 2798 | ||
2799 | if (INTEL_INFO(dev)->gen < 9) | ||
2800 | return 0; | ||
2801 | |||
2721 | drm_modeset_lock_all(dev); | 2802 | drm_modeset_lock_all(dev); |
2722 | 2803 | ||
2723 | ddb = &dev_priv->wm.skl_hw.ddb; | 2804 | ddb = &dev_priv->wm.skl_hw.ddb; |
@@ -2830,7 +2911,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | |||
2830 | struct drm_i915_private *dev_priv = dev->dev_private; | 2911 | struct drm_i915_private *dev_priv = dev->dev_private; |
2831 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | 2912 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; |
2832 | char buf[PIPE_CRC_BUFFER_LEN]; | 2913 | char buf[PIPE_CRC_BUFFER_LEN]; |
2833 | int head, tail, n_entries, n; | 2914 | int n_entries; |
2834 | ssize_t bytes_read; | 2915 | ssize_t bytes_read; |
2835 | 2916 | ||
2836 | /* | 2917 | /* |
@@ -2862,36 +2943,39 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | |||
2862 | } | 2943 | } |
2863 | 2944 | ||
2864 | /* We now have one or more entries to read */ | 2945 | /* We now have one or more entries to read */ |
2865 | head = pipe_crc->head; | 2946 | n_entries = count / PIPE_CRC_LINE_LEN; |
2866 | tail = pipe_crc->tail; | ||
2867 | n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), | ||
2868 | count / PIPE_CRC_LINE_LEN); | ||
2869 | spin_unlock_irq(&pipe_crc->lock); | ||
2870 | 2947 | ||
2871 | bytes_read = 0; | 2948 | bytes_read = 0; |
2872 | n = 0; | 2949 | while (n_entries > 0) { |
2873 | do { | 2950 | struct intel_pipe_crc_entry *entry = |
2874 | struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; | 2951 | &pipe_crc->entries[pipe_crc->tail]; |
2875 | int ret; | 2952 | int ret; |
2876 | 2953 | ||
2954 | if (CIRC_CNT(pipe_crc->head, pipe_crc->tail, | ||
2955 | INTEL_PIPE_CRC_ENTRIES_NR) < 1) | ||
2956 | break; | ||
2957 | |||
2958 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | ||
2959 | pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
2960 | |||
2877 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, | 2961 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, |
2878 | "%8u %8x %8x %8x %8x %8x\n", | 2962 | "%8u %8x %8x %8x %8x %8x\n", |
2879 | entry->frame, entry->crc[0], | 2963 | entry->frame, entry->crc[0], |
2880 | entry->crc[1], entry->crc[2], | 2964 | entry->crc[1], entry->crc[2], |
2881 | entry->crc[3], entry->crc[4]); | 2965 | entry->crc[3], entry->crc[4]); |
2882 | 2966 | ||
2883 | ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, | 2967 | spin_unlock_irq(&pipe_crc->lock); |
2884 | buf, PIPE_CRC_LINE_LEN); | 2968 | |
2969 | ret = copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN); | ||
2885 | if (ret == PIPE_CRC_LINE_LEN) | 2970 | if (ret == PIPE_CRC_LINE_LEN) |
2886 | return -EFAULT; | 2971 | return -EFAULT; |
2887 | 2972 | ||
2888 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | 2973 | user_buf += PIPE_CRC_LINE_LEN; |
2889 | tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | 2974 | n_entries--; |
2890 | n++; | 2975 | |
2891 | } while (--n_entries); | 2976 | spin_lock_irq(&pipe_crc->lock); |
2977 | } | ||
2892 | 2978 | ||
2893 | spin_lock_irq(&pipe_crc->lock); | ||
2894 | pipe_crc->tail = tail; | ||
2895 | spin_unlock_irq(&pipe_crc->lock); | 2979 | spin_unlock_irq(&pipe_crc->lock); |
2896 | 2980 | ||
2897 | return bytes_read; | 2981 | return bytes_read; |
@@ -3072,6 +3156,12 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, | |||
3072 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; | 3156 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV; |
3073 | need_stable_symbols = true; | 3157 | need_stable_symbols = true; |
3074 | break; | 3158 | break; |
3159 | case INTEL_PIPE_CRC_SOURCE_DP_D: | ||
3160 | if (!IS_CHERRYVIEW(dev)) | ||
3161 | return -EINVAL; | ||
3162 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV; | ||
3163 | need_stable_symbols = true; | ||
3164 | break; | ||
3075 | case INTEL_PIPE_CRC_SOURCE_NONE: | 3165 | case INTEL_PIPE_CRC_SOURCE_NONE: |
3076 | *val = 0; | 3166 | *val = 0; |
3077 | break; | 3167 | break; |
@@ -3092,11 +3182,19 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev, | |||
3092 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | 3182 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); |
3093 | 3183 | ||
3094 | tmp |= DC_BALANCE_RESET_VLV; | 3184 | tmp |= DC_BALANCE_RESET_VLV; |
3095 | if (pipe == PIPE_A) | 3185 | switch (pipe) { |
3186 | case PIPE_A: | ||
3096 | tmp |= PIPE_A_SCRAMBLE_RESET; | 3187 | tmp |= PIPE_A_SCRAMBLE_RESET; |
3097 | else | 3188 | break; |
3189 | case PIPE_B: | ||
3098 | tmp |= PIPE_B_SCRAMBLE_RESET; | 3190 | tmp |= PIPE_B_SCRAMBLE_RESET; |
3099 | 3191 | break; | |
3192 | case PIPE_C: | ||
3193 | tmp |= PIPE_C_SCRAMBLE_RESET; | ||
3194 | break; | ||
3195 | default: | ||
3196 | return -EINVAL; | ||
3197 | } | ||
3100 | I915_WRITE(PORT_DFT2_G4X, tmp); | 3198 | I915_WRITE(PORT_DFT2_G4X, tmp); |
3101 | } | 3199 | } |
3102 | 3200 | ||
@@ -3185,10 +3283,19 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, | |||
3185 | struct drm_i915_private *dev_priv = dev->dev_private; | 3283 | struct drm_i915_private *dev_priv = dev->dev_private; |
3186 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); | 3284 | uint32_t tmp = I915_READ(PORT_DFT2_G4X); |
3187 | 3285 | ||
3188 | if (pipe == PIPE_A) | 3286 | switch (pipe) { |
3287 | case PIPE_A: | ||
3189 | tmp &= ~PIPE_A_SCRAMBLE_RESET; | 3288 | tmp &= ~PIPE_A_SCRAMBLE_RESET; |
3190 | else | 3289 | break; |
3290 | case PIPE_B: | ||
3191 | tmp &= ~PIPE_B_SCRAMBLE_RESET; | 3291 | tmp &= ~PIPE_B_SCRAMBLE_RESET; |
3292 | break; | ||
3293 | case PIPE_C: | ||
3294 | tmp &= ~PIPE_C_SCRAMBLE_RESET; | ||
3295 | break; | ||
3296 | default: | ||
3297 | return; | ||
3298 | } | ||
3192 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) | 3299 | if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) |
3193 | tmp &= ~DC_BALANCE_RESET_VLV; | 3300 | tmp &= ~DC_BALANCE_RESET_VLV; |
3194 | I915_WRITE(PORT_DFT2_G4X, tmp); | 3301 | I915_WRITE(PORT_DFT2_G4X, tmp); |
@@ -3359,13 +3466,15 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
3359 | 3466 | ||
3360 | /* none -> real source transition */ | 3467 | /* none -> real source transition */ |
3361 | if (source) { | 3468 | if (source) { |
3469 | struct intel_pipe_crc_entry *entries; | ||
3470 | |||
3362 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", | 3471 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", |
3363 | pipe_name(pipe), pipe_crc_source_name(source)); | 3472 | pipe_name(pipe), pipe_crc_source_name(source)); |
3364 | 3473 | ||
3365 | pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * | 3474 | entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, |
3366 | INTEL_PIPE_CRC_ENTRIES_NR, | 3475 | sizeof(pipe_crc->entries[0]), |
3367 | GFP_KERNEL); | 3476 | GFP_KERNEL); |
3368 | if (!pipe_crc->entries) | 3477 | if (!entries) |
3369 | return -ENOMEM; | 3478 | return -ENOMEM; |
3370 | 3479 | ||
3371 | /* | 3480 | /* |
@@ -3377,6 +3486,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
3377 | hsw_disable_ips(crtc); | 3486 | hsw_disable_ips(crtc); |
3378 | 3487 | ||
3379 | spin_lock_irq(&pipe_crc->lock); | 3488 | spin_lock_irq(&pipe_crc->lock); |
3489 | kfree(pipe_crc->entries); | ||
3490 | pipe_crc->entries = entries; | ||
3380 | pipe_crc->head = 0; | 3491 | pipe_crc->head = 0; |
3381 | pipe_crc->tail = 0; | 3492 | pipe_crc->tail = 0; |
3382 | spin_unlock_irq(&pipe_crc->lock); | 3493 | spin_unlock_irq(&pipe_crc->lock); |
@@ -3404,6 +3515,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
3404 | spin_lock_irq(&pipe_crc->lock); | 3515 | spin_lock_irq(&pipe_crc->lock); |
3405 | entries = pipe_crc->entries; | 3516 | entries = pipe_crc->entries; |
3406 | pipe_crc->entries = NULL; | 3517 | pipe_crc->entries = NULL; |
3518 | pipe_crc->head = 0; | ||
3519 | pipe_crc->tail = 0; | ||
3407 | spin_unlock_irq(&pipe_crc->lock); | 3520 | spin_unlock_irq(&pipe_crc->lock); |
3408 | 3521 | ||
3409 | kfree(entries); | 3522 | kfree(entries); |
@@ -4296,6 +4409,7 @@ static const struct drm_info_list i915_debugfs_list[] = { | |||
4296 | {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, | 4409 | {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, |
4297 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, | 4410 | {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, |
4298 | {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, | 4411 | {"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS}, |
4412 | {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0}, | ||
4299 | {"i915_frequency_info", i915_frequency_info, 0}, | 4413 | {"i915_frequency_info", i915_frequency_info, 0}, |
4300 | {"i915_drpc_info", i915_drpc_info, 0}, | 4414 | {"i915_drpc_info", i915_drpc_info, 0}, |
4301 | {"i915_emon_status", i915_emon_status, 0}, | 4415 | {"i915_emon_status", i915_emon_status, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ecee3bcc8772..52730ed86385 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -928,6 +928,7 @@ int i915_driver_unload(struct drm_device *dev) | |||
928 | 928 | ||
929 | mutex_lock(&dev->struct_mutex); | 929 | mutex_lock(&dev->struct_mutex); |
930 | i915_gem_cleanup_ringbuffer(dev); | 930 | i915_gem_cleanup_ringbuffer(dev); |
931 | i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); | ||
931 | i915_gem_context_fini(dev); | 932 | i915_gem_context_fini(dev); |
932 | mutex_unlock(&dev->struct_mutex); | 933 | mutex_unlock(&dev->struct_mutex); |
933 | i915_gem_cleanup_stolen(dev); | 934 | i915_gem_cleanup_stolen(dev); |
@@ -1004,6 +1005,13 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) | |||
1004 | kfree(file_priv); | 1005 | kfree(file_priv); |
1005 | } | 1006 | } |
1006 | 1007 | ||
1008 | static int | ||
1009 | i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, | ||
1010 | struct drm_file *file) | ||
1011 | { | ||
1012 | return -ENODEV; | ||
1013 | } | ||
1014 | |||
1007 | const struct drm_ioctl_desc i915_ioctls[] = { | 1015 | const struct drm_ioctl_desc i915_ioctls[] = { |
1008 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | 1016 | DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), |
1009 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), | 1017 | DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), |
@@ -1025,8 +1033,8 @@ const struct drm_ioctl_desc i915_ioctls[] = { | |||
1025 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1033 | DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1026 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), | 1034 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), |
1027 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1035 | DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1028 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1036 | DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1029 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), | 1037 | DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), |
1030 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1038 | DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1031 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1039 | DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
1032 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), | 1040 | DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 574057cd1d09..0763fa0791e3 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -841,6 +841,8 @@ int i915_reset(struct drm_device *dev) | |||
841 | return ret; | 841 | return ret; |
842 | } | 842 | } |
843 | 843 | ||
844 | intel_overlay_reset(dev_priv); | ||
845 | |||
844 | /* Ok, now get things going again... */ | 846 | /* Ok, now get things going again... */ |
845 | 847 | ||
846 | /* | 848 | /* |
@@ -1299,7 +1301,9 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) | |||
1299 | err = vlv_allow_gt_wake(dev_priv, false); | 1301 | err = vlv_allow_gt_wake(dev_priv, false); |
1300 | if (err) | 1302 | if (err) |
1301 | goto err2; | 1303 | goto err2; |
1302 | vlv_save_gunit_s0ix_state(dev_priv); | 1304 | |
1305 | if (!IS_CHERRYVIEW(dev_priv->dev)) | ||
1306 | vlv_save_gunit_s0ix_state(dev_priv); | ||
1303 | 1307 | ||
1304 | err = vlv_force_gfx_clock(dev_priv, false); | 1308 | err = vlv_force_gfx_clock(dev_priv, false); |
1305 | if (err) | 1309 | if (err) |
@@ -1330,7 +1334,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, | |||
1330 | */ | 1334 | */ |
1331 | ret = vlv_force_gfx_clock(dev_priv, true); | 1335 | ret = vlv_force_gfx_clock(dev_priv, true); |
1332 | 1336 | ||
1333 | vlv_restore_gunit_s0ix_state(dev_priv); | 1337 | if (!IS_CHERRYVIEW(dev_priv->dev)) |
1338 | vlv_restore_gunit_s0ix_state(dev_priv); | ||
1334 | 1339 | ||
1335 | err = vlv_allow_gt_wake(dev_priv, true); | 1340 | err = vlv_allow_gt_wake(dev_priv, true); |
1336 | if (!ret) | 1341 | if (!ret) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 70d0f0f06f1a..fd7a493df0de 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -55,10 +55,51 @@ | |||
55 | 55 | ||
56 | #define DRIVER_NAME "i915" | 56 | #define DRIVER_NAME "i915" |
57 | #define DRIVER_DESC "Intel Graphics" | 57 | #define DRIVER_DESC "Intel Graphics" |
58 | #define DRIVER_DATE "20141121" | 58 | #define DRIVER_DATE "20141219" |
59 | 59 | ||
60 | #undef WARN_ON | 60 | #undef WARN_ON |
61 | #define WARN_ON(x) WARN(x, "WARN_ON(" #x ")") | 61 | /* Many gcc seem to no see through this and fall over :( */ |
62 | #if 0 | ||
63 | #define WARN_ON(x) ({ \ | ||
64 | bool __i915_warn_cond = (x); \ | ||
65 | if (__builtin_constant_p(__i915_warn_cond)) \ | ||
66 | BUILD_BUG_ON(__i915_warn_cond); \ | ||
67 | WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) | ||
68 | #else | ||
69 | #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") | ||
70 | #endif | ||
71 | |||
72 | #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ | ||
73 | (long) (x), __func__); | ||
74 | |||
75 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and | ||
76 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions | ||
77 | * which may not necessarily be a user visible problem. This will either | ||
78 | * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to | ||
79 | * enable distros and users to tailor their preferred amount of i915 abrt | ||
80 | * spam. | ||
81 | */ | ||
82 | #define I915_STATE_WARN(condition, format...) ({ \ | ||
83 | int __ret_warn_on = !!(condition); \ | ||
84 | if (unlikely(__ret_warn_on)) { \ | ||
85 | if (i915.verbose_state_checks) \ | ||
86 | __WARN_printf(format); \ | ||
87 | else \ | ||
88 | DRM_ERROR(format); \ | ||
89 | } \ | ||
90 | unlikely(__ret_warn_on); \ | ||
91 | }) | ||
92 | |||
93 | #define I915_STATE_WARN_ON(condition) ({ \ | ||
94 | int __ret_warn_on = !!(condition); \ | ||
95 | if (unlikely(__ret_warn_on)) { \ | ||
96 | if (i915.verbose_state_checks) \ | ||
97 | __WARN_printf("WARN_ON(" #condition ")\n"); \ | ||
98 | else \ | ||
99 | DRM_ERROR("WARN_ON(" #condition ")\n"); \ | ||
100 | } \ | ||
101 | unlikely(__ret_warn_on); \ | ||
102 | }) | ||
62 | 103 | ||
63 | enum pipe { | 104 | enum pipe { |
64 | INVALID_PIPE = -1, | 105 | INVALID_PIPE = -1, |
@@ -1130,6 +1171,11 @@ struct intel_l3_parity { | |||
1130 | int which_slice; | 1171 | int which_slice; |
1131 | }; | 1172 | }; |
1132 | 1173 | ||
1174 | struct i915_gem_batch_pool { | ||
1175 | struct drm_device *dev; | ||
1176 | struct list_head cache_list; | ||
1177 | }; | ||
1178 | |||
1133 | struct i915_gem_mm { | 1179 | struct i915_gem_mm { |
1134 | /** Memory allocator for GTT stolen memory */ | 1180 | /** Memory allocator for GTT stolen memory */ |
1135 | struct drm_mm stolen; | 1181 | struct drm_mm stolen; |
@@ -1143,6 +1189,13 @@ struct i915_gem_mm { | |||
1143 | */ | 1189 | */ |
1144 | struct list_head unbound_list; | 1190 | struct list_head unbound_list; |
1145 | 1191 | ||
1192 | /* | ||
1193 | * A pool of objects to use as shadow copies of client batch buffers | ||
1194 | * when the command parser is enabled. Prevents the client from | ||
1195 | * modifying the batch contents after software parsing. | ||
1196 | */ | ||
1197 | struct i915_gem_batch_pool batch_pool; | ||
1198 | |||
1146 | /** Usable portion of the GTT for GEM */ | 1199 | /** Usable portion of the GTT for GEM */ |
1147 | unsigned long stolen_base; /* limited to low memory (32-bit) */ | 1200 | unsigned long stolen_base; /* limited to low memory (32-bit) */ |
1148 | 1201 | ||
@@ -1307,6 +1360,13 @@ enum drrs_support_type { | |||
1307 | SEAMLESS_DRRS_SUPPORT = 2 | 1360 | SEAMLESS_DRRS_SUPPORT = 2 |
1308 | }; | 1361 | }; |
1309 | 1362 | ||
1363 | enum psr_lines_to_wait { | ||
1364 | PSR_0_LINES_TO_WAIT = 0, | ||
1365 | PSR_1_LINE_TO_WAIT, | ||
1366 | PSR_4_LINES_TO_WAIT, | ||
1367 | PSR_8_LINES_TO_WAIT | ||
1368 | }; | ||
1369 | |||
1310 | struct intel_vbt_data { | 1370 | struct intel_vbt_data { |
1311 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 1371 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
1312 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 1372 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
@@ -1336,10 +1396,20 @@ struct intel_vbt_data { | |||
1336 | struct edp_power_seq edp_pps; | 1396 | struct edp_power_seq edp_pps; |
1337 | 1397 | ||
1338 | struct { | 1398 | struct { |
1399 | bool full_link; | ||
1400 | bool require_aux_wakeup; | ||
1401 | int idle_frames; | ||
1402 | enum psr_lines_to_wait lines_to_wait; | ||
1403 | int tp1_wakeup_time; | ||
1404 | int tp2_tp3_wakeup_time; | ||
1405 | } psr; | ||
1406 | |||
1407 | struct { | ||
1339 | u16 pwm_freq_hz; | 1408 | u16 pwm_freq_hz; |
1340 | bool present; | 1409 | bool present; |
1341 | bool active_low_pwm; | 1410 | bool active_low_pwm; |
1342 | u8 min_brightness; /* min_brightness/255 of max */ | 1411 | u8 min_brightness; /* min_brightness/255 of max */ |
1412 | u8 controller; /* brightness controller number */ | ||
1343 | } backlight; | 1413 | } backlight; |
1344 | 1414 | ||
1345 | /* MIPI DSI */ | 1415 | /* MIPI DSI */ |
@@ -1772,6 +1842,8 @@ struct drm_i915_private { | |||
1772 | void (*stop_ring)(struct intel_engine_cs *ring); | 1842 | void (*stop_ring)(struct intel_engine_cs *ring); |
1773 | } gt; | 1843 | } gt; |
1774 | 1844 | ||
1845 | uint32_t request_uniq; | ||
1846 | |||
1775 | /* | 1847 | /* |
1776 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch | 1848 | * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch |
1777 | * will be rejected. Instead look for a better place. | 1849 | * will be rejected. Instead look for a better place. |
@@ -1855,6 +1927,8 @@ struct drm_i915_gem_object { | |||
1855 | /** Used in execbuf to temporarily hold a ref */ | 1927 | /** Used in execbuf to temporarily hold a ref */ |
1856 | struct list_head obj_exec_link; | 1928 | struct list_head obj_exec_link; |
1857 | 1929 | ||
1930 | struct list_head batch_pool_list; | ||
1931 | |||
1858 | /** | 1932 | /** |
1859 | * This is set if the object is on the active lists (has pending | 1933 | * This is set if the object is on the active lists (has pending |
1860 | * rendering and so a non-zero seqno), and is not set if it i s on | 1934 | * rendering and so a non-zero seqno), and is not set if it i s on |
@@ -1926,13 +2000,11 @@ struct drm_i915_gem_object { | |||
1926 | void *dma_buf_vmapping; | 2000 | void *dma_buf_vmapping; |
1927 | int vmapping_count; | 2001 | int vmapping_count; |
1928 | 2002 | ||
1929 | struct intel_engine_cs *ring; | ||
1930 | |||
1931 | /** Breadcrumb of last rendering to the buffer. */ | 2003 | /** Breadcrumb of last rendering to the buffer. */ |
1932 | uint32_t last_read_seqno; | 2004 | struct drm_i915_gem_request *last_read_req; |
1933 | uint32_t last_write_seqno; | 2005 | struct drm_i915_gem_request *last_write_req; |
1934 | /** Breadcrumb of last fenced GPU access to the buffer. */ | 2006 | /** Breadcrumb of last fenced GPU access to the buffer. */ |
1935 | uint32_t last_fenced_seqno; | 2007 | struct drm_i915_gem_request *last_fenced_req; |
1936 | 2008 | ||
1937 | /** Current tiling stride for the object, if it's tiled. */ | 2009 | /** Current tiling stride for the object, if it's tiled. */ |
1938 | uint32_t stride; | 2010 | uint32_t stride; |
@@ -1943,10 +2015,6 @@ struct drm_i915_gem_object { | |||
1943 | /** Record of address bit 17 of each page at last unbind. */ | 2015 | /** Record of address bit 17 of each page at last unbind. */ |
1944 | unsigned long *bit_17; | 2016 | unsigned long *bit_17; |
1945 | 2017 | ||
1946 | /** User space pin count and filp owning the pin */ | ||
1947 | unsigned long user_pin_count; | ||
1948 | struct drm_file *pin_filp; | ||
1949 | |||
1950 | union { | 2018 | union { |
1951 | /** for phy allocated objects */ | 2019 | /** for phy allocated objects */ |
1952 | struct drm_dma_handle *phys_handle; | 2020 | struct drm_dma_handle *phys_handle; |
@@ -1975,11 +2043,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
1975 | * The request queue allows us to note sequence numbers that have been emitted | 2043 | * The request queue allows us to note sequence numbers that have been emitted |
1976 | * and may be associated with active buffers to be retired. | 2044 | * and may be associated with active buffers to be retired. |
1977 | * | 2045 | * |
1978 | * By keeping this list, we can avoid having to do questionable | 2046 | * By keeping this list, we can avoid having to do questionable sequence |
1979 | * sequence-number comparisons on buffer last_rendering_seqnos, and associate | 2047 | * number comparisons on buffer last_read|write_seqno. It also allows an |
1980 | * an emission time with seqnos for tracking how far ahead of the GPU we are. | 2048 | * emission time to be associated with the request for tracking how far ahead |
2049 | * of the GPU the submission is. | ||
1981 | */ | 2050 | */ |
1982 | struct drm_i915_gem_request { | 2051 | struct drm_i915_gem_request { |
2052 | struct kref ref; | ||
2053 | |||
1983 | /** On Which ring this request was generated */ | 2054 | /** On Which ring this request was generated */ |
1984 | struct intel_engine_cs *ring; | 2055 | struct intel_engine_cs *ring; |
1985 | 2056 | ||
@@ -2007,8 +2078,55 @@ struct drm_i915_gem_request { | |||
2007 | struct drm_i915_file_private *file_priv; | 2078 | struct drm_i915_file_private *file_priv; |
2008 | /** file_priv list entry for this request */ | 2079 | /** file_priv list entry for this request */ |
2009 | struct list_head client_list; | 2080 | struct list_head client_list; |
2081 | |||
2082 | uint32_t uniq; | ||
2010 | }; | 2083 | }; |
2011 | 2084 | ||
2085 | void i915_gem_request_free(struct kref *req_ref); | ||
2086 | |||
2087 | static inline uint32_t | ||
2088 | i915_gem_request_get_seqno(struct drm_i915_gem_request *req) | ||
2089 | { | ||
2090 | return req ? req->seqno : 0; | ||
2091 | } | ||
2092 | |||
2093 | static inline struct intel_engine_cs * | ||
2094 | i915_gem_request_get_ring(struct drm_i915_gem_request *req) | ||
2095 | { | ||
2096 | return req ? req->ring : NULL; | ||
2097 | } | ||
2098 | |||
2099 | static inline void | ||
2100 | i915_gem_request_reference(struct drm_i915_gem_request *req) | ||
2101 | { | ||
2102 | kref_get(&req->ref); | ||
2103 | } | ||
2104 | |||
2105 | static inline void | ||
2106 | i915_gem_request_unreference(struct drm_i915_gem_request *req) | ||
2107 | { | ||
2108 | WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); | ||
2109 | kref_put(&req->ref, i915_gem_request_free); | ||
2110 | } | ||
2111 | |||
2112 | static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, | ||
2113 | struct drm_i915_gem_request *src) | ||
2114 | { | ||
2115 | if (src) | ||
2116 | i915_gem_request_reference(src); | ||
2117 | |||
2118 | if (*pdst) | ||
2119 | i915_gem_request_unreference(*pdst); | ||
2120 | |||
2121 | *pdst = src; | ||
2122 | } | ||
2123 | |||
2124 | /* | ||
2125 | * XXX: i915_gem_request_completed should be here but currently needs the | ||
2126 | * definition of i915_seqno_passed() which is below. It will be moved in | ||
2127 | * a later patch when the call to i915_seqno_passed() is obsoleted... | ||
2128 | */ | ||
2129 | |||
2012 | struct drm_i915_file_private { | 2130 | struct drm_i915_file_private { |
2013 | struct drm_i915_private *dev_priv; | 2131 | struct drm_i915_private *dev_priv; |
2014 | struct drm_file *file; | 2132 | struct drm_file *file; |
@@ -2242,7 +2360,8 @@ struct drm_i915_cmd_table { | |||
2242 | 2360 | ||
2243 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) | 2361 | #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) |
2244 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) | 2362 | #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) |
2245 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) | 2363 | #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \ |
2364 | IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) | ||
2246 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ | 2365 | #define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ |
2247 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) | 2366 | IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) |
2248 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) | 2367 | #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) |
@@ -2312,6 +2431,7 @@ struct i915_params { | |||
2312 | bool disable_vtd_wa; | 2431 | bool disable_vtd_wa; |
2313 | int use_mmio_flip; | 2432 | int use_mmio_flip; |
2314 | bool mmio_debug; | 2433 | bool mmio_debug; |
2434 | bool verbose_state_checks; | ||
2315 | }; | 2435 | }; |
2316 | extern struct i915_params i915 __read_mostly; | 2436 | extern struct i915_params i915 __read_mostly; |
2317 | 2437 | ||
@@ -2412,10 +2532,6 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data, | |||
2412 | struct drm_file *file_priv); | 2532 | struct drm_file *file_priv); |
2413 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, | 2533 | int i915_gem_execbuffer2(struct drm_device *dev, void *data, |
2414 | struct drm_file *file_priv); | 2534 | struct drm_file *file_priv); |
2415 | int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | ||
2416 | struct drm_file *file_priv); | ||
2417 | int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | ||
2418 | struct drm_file *file_priv); | ||
2419 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 2535 | int i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
2420 | struct drm_file *file_priv); | 2536 | struct drm_file *file_priv); |
2421 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, | 2537 | int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
@@ -2460,10 +2576,23 @@ void i915_gem_vma_destroy(struct i915_vma *vma); | |||
2460 | #define PIN_GLOBAL 0x4 | 2576 | #define PIN_GLOBAL 0x4 |
2461 | #define PIN_OFFSET_BIAS 0x8 | 2577 | #define PIN_OFFSET_BIAS 0x8 |
2462 | #define PIN_OFFSET_MASK (~4095) | 2578 | #define PIN_OFFSET_MASK (~4095) |
2579 | int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, | ||
2580 | struct i915_address_space *vm, | ||
2581 | uint32_t alignment, | ||
2582 | uint64_t flags, | ||
2583 | const struct i915_ggtt_view *view); | ||
2584 | static inline | ||
2463 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, | 2585 | int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
2464 | struct i915_address_space *vm, | 2586 | struct i915_address_space *vm, |
2465 | uint32_t alignment, | 2587 | uint32_t alignment, |
2466 | uint64_t flags); | 2588 | uint64_t flags) |
2589 | { | ||
2590 | return i915_gem_object_pin_view(obj, vm, alignment, flags, | ||
2591 | &i915_ggtt_view_normal); | ||
2592 | } | ||
2593 | |||
2594 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | ||
2595 | u32 flags); | ||
2467 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2596 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2468 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); | 2597 | int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
2469 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); | 2598 | void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); |
@@ -2512,6 +2641,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
2512 | return (int32_t)(seq1 - seq2) >= 0; | 2641 | return (int32_t)(seq1 - seq2) >= 0; |
2513 | } | 2642 | } |
2514 | 2643 | ||
2644 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, | ||
2645 | bool lazy_coherency) | ||
2646 | { | ||
2647 | u32 seqno; | ||
2648 | |||
2649 | BUG_ON(req == NULL); | ||
2650 | |||
2651 | seqno = req->ring->get_seqno(req->ring, lazy_coherency); | ||
2652 | |||
2653 | return i915_seqno_passed(seqno, req->seqno); | ||
2654 | } | ||
2655 | |||
2515 | int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); | 2656 | int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
2516 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); | 2657 | int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
2517 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); | 2658 | int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
@@ -2527,7 +2668,7 @@ bool i915_gem_retire_requests(struct drm_device *dev); | |||
2527 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); | 2668 | void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); |
2528 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, | 2669 | int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
2529 | bool interruptible); | 2670 | bool interruptible); |
2530 | int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno); | 2671 | int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); |
2531 | 2672 | ||
2532 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) | 2673 | static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
2533 | { | 2674 | { |
@@ -2570,17 +2711,15 @@ int __must_check i915_gpu_idle(struct drm_device *dev); | |||
2570 | int __must_check i915_gem_suspend(struct drm_device *dev); | 2711 | int __must_check i915_gem_suspend(struct drm_device *dev); |
2571 | int __i915_add_request(struct intel_engine_cs *ring, | 2712 | int __i915_add_request(struct intel_engine_cs *ring, |
2572 | struct drm_file *file, | 2713 | struct drm_file *file, |
2573 | struct drm_i915_gem_object *batch_obj, | 2714 | struct drm_i915_gem_object *batch_obj); |
2574 | u32 *seqno); | 2715 | #define i915_add_request(ring) \ |
2575 | #define i915_add_request(ring, seqno) \ | 2716 | __i915_add_request(ring, NULL, NULL) |
2576 | __i915_add_request(ring, NULL, NULL, seqno) | 2717 | int __i915_wait_request(struct drm_i915_gem_request *req, |
2577 | int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | ||
2578 | unsigned reset_counter, | 2718 | unsigned reset_counter, |
2579 | bool interruptible, | 2719 | bool interruptible, |
2580 | s64 *timeout, | 2720 | s64 *timeout, |
2581 | struct drm_i915_file_private *file_priv); | 2721 | struct drm_i915_file_private *file_priv); |
2582 | int __must_check i915_wait_seqno(struct intel_engine_cs *ring, | 2722 | int __must_check i915_wait_request(struct drm_i915_gem_request *req); |
2583 | uint32_t seqno); | ||
2584 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); | 2723 | int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); |
2585 | int __must_check | 2724 | int __must_check |
2586 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, | 2725 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, |
@@ -2614,18 +2753,51 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, | |||
2614 | 2753 | ||
2615 | void i915_gem_restore_fences(struct drm_device *dev); | 2754 | void i915_gem_restore_fences(struct drm_device *dev); |
2616 | 2755 | ||
2756 | unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, | ||
2757 | struct i915_address_space *vm, | ||
2758 | enum i915_ggtt_view_type view); | ||
2759 | static inline | ||
2617 | unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, | 2760 | unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, |
2618 | struct i915_address_space *vm); | 2761 | struct i915_address_space *vm) |
2762 | { | ||
2763 | return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); | ||
2764 | } | ||
2619 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); | 2765 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); |
2766 | bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, | ||
2767 | struct i915_address_space *vm, | ||
2768 | enum i915_ggtt_view_type view); | ||
2769 | static inline | ||
2620 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, | 2770 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
2621 | struct i915_address_space *vm); | 2771 | struct i915_address_space *vm) |
2772 | { | ||
2773 | return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL); | ||
2774 | } | ||
2775 | |||
2622 | unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, | 2776 | unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, |
2623 | struct i915_address_space *vm); | 2777 | struct i915_address_space *vm); |
2778 | struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, | ||
2779 | struct i915_address_space *vm, | ||
2780 | const struct i915_ggtt_view *view); | ||
2781 | static inline | ||
2624 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, | 2782 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
2625 | struct i915_address_space *vm); | 2783 | struct i915_address_space *vm) |
2784 | { | ||
2785 | return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal); | ||
2786 | } | ||
2787 | |||
2788 | struct i915_vma * | ||
2789 | i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, | ||
2790 | struct i915_address_space *vm, | ||
2791 | const struct i915_ggtt_view *view); | ||
2792 | |||
2793 | static inline | ||
2626 | struct i915_vma * | 2794 | struct i915_vma * |
2627 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, | 2795 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
2628 | struct i915_address_space *vm); | 2796 | struct i915_address_space *vm) |
2797 | { | ||
2798 | return i915_gem_obj_lookup_or_create_vma_view(obj, vm, | ||
2799 | &i915_ggtt_view_normal); | ||
2800 | } | ||
2629 | 2801 | ||
2630 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); | 2802 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); |
2631 | static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { | 2803 | static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { |
@@ -2807,6 +2979,13 @@ void i915_destroy_error_state(struct drm_device *dev); | |||
2807 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); | 2979 | void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); |
2808 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type); | 2980 | const char *i915_cache_level_str(struct drm_i915_private *i915, int type); |
2809 | 2981 | ||
2982 | /* i915_gem_batch_pool.c */ | ||
2983 | void i915_gem_batch_pool_init(struct drm_device *dev, | ||
2984 | struct i915_gem_batch_pool *pool); | ||
2985 | void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool); | ||
2986 | struct drm_i915_gem_object* | ||
2987 | i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size); | ||
2988 | |||
2810 | /* i915_cmd_parser.c */ | 2989 | /* i915_cmd_parser.c */ |
2811 | int i915_cmd_parser_get_version(void); | 2990 | int i915_cmd_parser_get_version(void); |
2812 | int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); | 2991 | int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); |
@@ -2814,7 +2993,9 @@ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); | |||
2814 | bool i915_needs_cmd_parser(struct intel_engine_cs *ring); | 2993 | bool i915_needs_cmd_parser(struct intel_engine_cs *ring); |
2815 | int i915_parse_cmds(struct intel_engine_cs *ring, | 2994 | int i915_parse_cmds(struct intel_engine_cs *ring, |
2816 | struct drm_i915_gem_object *batch_obj, | 2995 | struct drm_i915_gem_object *batch_obj, |
2996 | struct drm_i915_gem_object *shadow_batch_obj, | ||
2817 | u32 batch_start_offset, | 2997 | u32 batch_start_offset, |
2998 | u32 batch_len, | ||
2818 | bool is_master); | 2999 | bool is_master); |
2819 | 3000 | ||
2820 | /* i915_suspend.c */ | 3001 | /* i915_suspend.c */ |
@@ -2894,9 +3075,6 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
2894 | bool force_restore); | 3075 | bool force_restore); |
2895 | extern void i915_redisable_vga(struct drm_device *dev); | 3076 | extern void i915_redisable_vga(struct drm_device *dev); |
2896 | extern void i915_redisable_vga_power_on(struct drm_device *dev); | 3077 | extern void i915_redisable_vga_power_on(struct drm_device *dev); |
2897 | extern bool intel_fbc_enabled(struct drm_device *dev); | ||
2898 | extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); | ||
2899 | extern void intel_disable_fbc(struct drm_device *dev); | ||
2900 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 3078 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
2901 | extern void intel_init_pch_refclk(struct drm_device *dev); | 3079 | extern void intel_init_pch_refclk(struct drm_device *dev); |
2902 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | 3080 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
@@ -3072,4 +3250,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) | |||
3072 | } | 3250 | } |
3073 | } | 3251 | } |
3074 | 3252 | ||
3253 | static inline void i915_trace_irq_get(struct intel_engine_cs *ring, | ||
3254 | struct drm_i915_gem_request *req) | ||
3255 | { | ||
3256 | if (ring->trace_irq_req == NULL && ring->irq_get(ring)) | ||
3257 | i915_gem_request_assign(&ring->trace_irq_req, req); | ||
3258 | } | ||
3259 | |||
3075 | #endif | 3260 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52adcb680be3..3044fb324c8e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1151,19 +1151,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error, | |||
1151 | } | 1151 | } |
1152 | 1152 | ||
1153 | /* | 1153 | /* |
1154 | * Compare seqno against outstanding lazy request. Emit a request if they are | 1154 | * Compare arbitrary request against outstanding lazy request. Emit on match. |
1155 | * equal. | ||
1156 | */ | 1155 | */ |
1157 | int | 1156 | int |
1158 | i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno) | 1157 | i915_gem_check_olr(struct drm_i915_gem_request *req) |
1159 | { | 1158 | { |
1160 | int ret; | 1159 | int ret; |
1161 | 1160 | ||
1162 | BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); | 1161 | WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); |
1163 | 1162 | ||
1164 | ret = 0; | 1163 | ret = 0; |
1165 | if (seqno == ring->outstanding_lazy_seqno) | 1164 | if (req == req->ring->outstanding_lazy_request) |
1166 | ret = i915_add_request(ring, NULL); | 1165 | ret = i915_add_request(req->ring); |
1167 | 1166 | ||
1168 | return ret; | 1167 | return ret; |
1169 | } | 1168 | } |
@@ -1188,10 +1187,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) | |||
1188 | } | 1187 | } |
1189 | 1188 | ||
1190 | /** | 1189 | /** |
1191 | * __i915_wait_seqno - wait until execution of seqno has finished | 1190 | * __i915_wait_request - wait until execution of request has finished |
1192 | * @ring: the ring expected to report seqno | 1191 | * @req: duh! |
1193 | * @seqno: duh! | 1192 | * @reset_counter: reset sequence associated with the given request |
1194 | * @reset_counter: reset sequence associated with the given seqno | ||
1195 | * @interruptible: do an interruptible wait (normally yes) | 1193 | * @interruptible: do an interruptible wait (normally yes) |
1196 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining | 1194 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
1197 | * | 1195 | * |
@@ -1202,15 +1200,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv) | |||
1202 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be | 1200 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be |
1203 | * inserted. | 1201 | * inserted. |
1204 | * | 1202 | * |
1205 | * Returns 0 if the seqno was found within the alloted time. Else returns the | 1203 | * Returns 0 if the request was found within the alloted time. Else returns the |
1206 | * errno with remaining time filled in timeout argument. | 1204 | * errno with remaining time filled in timeout argument. |
1207 | */ | 1205 | */ |
1208 | int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | 1206 | int __i915_wait_request(struct drm_i915_gem_request *req, |
1209 | unsigned reset_counter, | 1207 | unsigned reset_counter, |
1210 | bool interruptible, | 1208 | bool interruptible, |
1211 | s64 *timeout, | 1209 | s64 *timeout, |
1212 | struct drm_i915_file_private *file_priv) | 1210 | struct drm_i915_file_private *file_priv) |
1213 | { | 1211 | { |
1212 | struct intel_engine_cs *ring = i915_gem_request_get_ring(req); | ||
1214 | struct drm_device *dev = ring->dev; | 1213 | struct drm_device *dev = ring->dev; |
1215 | struct drm_i915_private *dev_priv = dev->dev_private; | 1214 | struct drm_i915_private *dev_priv = dev->dev_private; |
1216 | const bool irq_test_in_progress = | 1215 | const bool irq_test_in_progress = |
@@ -1222,7 +1221,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1222 | 1221 | ||
1223 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); | 1222 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
1224 | 1223 | ||
1225 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) | 1224 | if (i915_gem_request_completed(req, true)) |
1226 | return 0; | 1225 | return 0; |
1227 | 1226 | ||
1228 | timeout_expire = timeout ? | 1227 | timeout_expire = timeout ? |
@@ -1240,7 +1239,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1240 | return -ENODEV; | 1239 | return -ENODEV; |
1241 | 1240 | ||
1242 | /* Record current time in case interrupted by signal, or wedged */ | 1241 | /* Record current time in case interrupted by signal, or wedged */ |
1243 | trace_i915_gem_request_wait_begin(ring, seqno); | 1242 | trace_i915_gem_request_wait_begin(req); |
1244 | before = ktime_get_raw_ns(); | 1243 | before = ktime_get_raw_ns(); |
1245 | for (;;) { | 1244 | for (;;) { |
1246 | struct timer_list timer; | 1245 | struct timer_list timer; |
@@ -1259,7 +1258,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1259 | break; | 1258 | break; |
1260 | } | 1259 | } |
1261 | 1260 | ||
1262 | if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) { | 1261 | if (i915_gem_request_completed(req, false)) { |
1263 | ret = 0; | 1262 | ret = 0; |
1264 | break; | 1263 | break; |
1265 | } | 1264 | } |
@@ -1291,7 +1290,7 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1291 | } | 1290 | } |
1292 | } | 1291 | } |
1293 | now = ktime_get_raw_ns(); | 1292 | now = ktime_get_raw_ns(); |
1294 | trace_i915_gem_request_wait_end(ring, seqno); | 1293 | trace_i915_gem_request_wait_end(req); |
1295 | 1294 | ||
1296 | if (!irq_test_in_progress) | 1295 | if (!irq_test_in_progress) |
1297 | ring->irq_put(ring); | 1296 | ring->irq_put(ring); |
@@ -1318,32 +1317,40 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, | |||
1318 | } | 1317 | } |
1319 | 1318 | ||
1320 | /** | 1319 | /** |
1321 | * Waits for a sequence number to be signaled, and cleans up the | 1320 | * Waits for a request to be signaled, and cleans up the |
1322 | * request and object lists appropriately for that event. | 1321 | * request and object lists appropriately for that event. |
1323 | */ | 1322 | */ |
1324 | int | 1323 | int |
1325 | i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) | 1324 | i915_wait_request(struct drm_i915_gem_request *req) |
1326 | { | 1325 | { |
1327 | struct drm_device *dev = ring->dev; | 1326 | struct drm_device *dev; |
1328 | struct drm_i915_private *dev_priv = dev->dev_private; | 1327 | struct drm_i915_private *dev_priv; |
1329 | bool interruptible = dev_priv->mm.interruptible; | 1328 | bool interruptible; |
1330 | unsigned reset_counter; | 1329 | unsigned reset_counter; |
1331 | int ret; | 1330 | int ret; |
1332 | 1331 | ||
1332 | BUG_ON(req == NULL); | ||
1333 | |||
1334 | dev = req->ring->dev; | ||
1335 | dev_priv = dev->dev_private; | ||
1336 | interruptible = dev_priv->mm.interruptible; | ||
1337 | |||
1333 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1338 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1334 | BUG_ON(seqno == 0); | ||
1335 | 1339 | ||
1336 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); | 1340 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
1337 | if (ret) | 1341 | if (ret) |
1338 | return ret; | 1342 | return ret; |
1339 | 1343 | ||
1340 | ret = i915_gem_check_olr(ring, seqno); | 1344 | ret = i915_gem_check_olr(req); |
1341 | if (ret) | 1345 | if (ret) |
1342 | return ret; | 1346 | return ret; |
1343 | 1347 | ||
1344 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 1348 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1345 | return __i915_wait_seqno(ring, seqno, reset_counter, interruptible, | 1349 | i915_gem_request_reference(req); |
1346 | NULL, NULL); | 1350 | ret = __i915_wait_request(req, reset_counter, |
1351 | interruptible, NULL, NULL); | ||
1352 | i915_gem_request_unreference(req); | ||
1353 | return ret; | ||
1347 | } | 1354 | } |
1348 | 1355 | ||
1349 | static int | 1356 | static int |
@@ -1355,11 +1362,11 @@ i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) | |||
1355 | /* Manually manage the write flush as we may have not yet | 1362 | /* Manually manage the write flush as we may have not yet |
1356 | * retired the buffer. | 1363 | * retired the buffer. |
1357 | * | 1364 | * |
1358 | * Note that the last_write_seqno is always the earlier of | 1365 | * Note that the last_write_req is always the earlier of |
1359 | * the two (read/write) seqno, so if we haved successfully waited, | 1366 | * the two (read/write) requests, so if we haved successfully waited, |
1360 | * we know we have passed the last write. | 1367 | * we know we have passed the last write. |
1361 | */ | 1368 | */ |
1362 | obj->last_write_seqno = 0; | 1369 | i915_gem_request_assign(&obj->last_write_req, NULL); |
1363 | 1370 | ||
1364 | return 0; | 1371 | return 0; |
1365 | } | 1372 | } |
@@ -1372,15 +1379,14 @@ static __must_check int | |||
1372 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | 1379 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, |
1373 | bool readonly) | 1380 | bool readonly) |
1374 | { | 1381 | { |
1375 | struct intel_engine_cs *ring = obj->ring; | 1382 | struct drm_i915_gem_request *req; |
1376 | u32 seqno; | ||
1377 | int ret; | 1383 | int ret; |
1378 | 1384 | ||
1379 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | 1385 | req = readonly ? obj->last_write_req : obj->last_read_req; |
1380 | if (seqno == 0) | 1386 | if (!req) |
1381 | return 0; | 1387 | return 0; |
1382 | 1388 | ||
1383 | ret = i915_wait_seqno(ring, seqno); | 1389 | ret = i915_wait_request(req); |
1384 | if (ret) | 1390 | if (ret) |
1385 | return ret; | 1391 | return ret; |
1386 | 1392 | ||
@@ -1395,33 +1401,33 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, | |||
1395 | struct drm_i915_file_private *file_priv, | 1401 | struct drm_i915_file_private *file_priv, |
1396 | bool readonly) | 1402 | bool readonly) |
1397 | { | 1403 | { |
1404 | struct drm_i915_gem_request *req; | ||
1398 | struct drm_device *dev = obj->base.dev; | 1405 | struct drm_device *dev = obj->base.dev; |
1399 | struct drm_i915_private *dev_priv = dev->dev_private; | 1406 | struct drm_i915_private *dev_priv = dev->dev_private; |
1400 | struct intel_engine_cs *ring = obj->ring; | ||
1401 | unsigned reset_counter; | 1407 | unsigned reset_counter; |
1402 | u32 seqno; | ||
1403 | int ret; | 1408 | int ret; |
1404 | 1409 | ||
1405 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | 1410 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1406 | BUG_ON(!dev_priv->mm.interruptible); | 1411 | BUG_ON(!dev_priv->mm.interruptible); |
1407 | 1412 | ||
1408 | seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; | 1413 | req = readonly ? obj->last_write_req : obj->last_read_req; |
1409 | if (seqno == 0) | 1414 | if (!req) |
1410 | return 0; | 1415 | return 0; |
1411 | 1416 | ||
1412 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); | 1417 | ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); |
1413 | if (ret) | 1418 | if (ret) |
1414 | return ret; | 1419 | return ret; |
1415 | 1420 | ||
1416 | ret = i915_gem_check_olr(ring, seqno); | 1421 | ret = i915_gem_check_olr(req); |
1417 | if (ret) | 1422 | if (ret) |
1418 | return ret; | 1423 | return ret; |
1419 | 1424 | ||
1420 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 1425 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1426 | i915_gem_request_reference(req); | ||
1421 | mutex_unlock(&dev->struct_mutex); | 1427 | mutex_unlock(&dev->struct_mutex); |
1422 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, | 1428 | ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv); |
1423 | file_priv); | ||
1424 | mutex_lock(&dev->struct_mutex); | 1429 | mutex_lock(&dev->struct_mutex); |
1430 | i915_gem_request_unreference(req); | ||
1425 | if (ret) | 1431 | if (ret) |
1426 | return ret; | 1432 | return ret; |
1427 | 1433 | ||
@@ -2250,14 +2256,18 @@ static void | |||
2250 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | 2256 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
2251 | struct intel_engine_cs *ring) | 2257 | struct intel_engine_cs *ring) |
2252 | { | 2258 | { |
2253 | u32 seqno = intel_ring_get_seqno(ring); | 2259 | struct drm_i915_gem_request *req; |
2260 | struct intel_engine_cs *old_ring; | ||
2254 | 2261 | ||
2255 | BUG_ON(ring == NULL); | 2262 | BUG_ON(ring == NULL); |
2256 | if (obj->ring != ring && obj->last_write_seqno) { | 2263 | |
2257 | /* Keep the seqno relative to the current ring */ | 2264 | req = intel_ring_get_request(ring); |
2258 | obj->last_write_seqno = seqno; | 2265 | old_ring = i915_gem_request_get_ring(obj->last_read_req); |
2266 | |||
2267 | if (old_ring != ring && obj->last_write_req) { | ||
2268 | /* Keep the request relative to the current ring */ | ||
2269 | i915_gem_request_assign(&obj->last_write_req, req); | ||
2259 | } | 2270 | } |
2260 | obj->ring = ring; | ||
2261 | 2271 | ||
2262 | /* Add a reference if we're newly entering the active list. */ | 2272 | /* Add a reference if we're newly entering the active list. */ |
2263 | if (!obj->active) { | 2273 | if (!obj->active) { |
@@ -2267,7 +2277,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
2267 | 2277 | ||
2268 | list_move_tail(&obj->ring_list, &ring->active_list); | 2278 | list_move_tail(&obj->ring_list, &ring->active_list); |
2269 | 2279 | ||
2270 | obj->last_read_seqno = seqno; | 2280 | i915_gem_request_assign(&obj->last_read_req, req); |
2271 | } | 2281 | } |
2272 | 2282 | ||
2273 | void i915_vma_move_to_active(struct i915_vma *vma, | 2283 | void i915_vma_move_to_active(struct i915_vma *vma, |
@@ -2280,29 +2290,25 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
2280 | static void | 2290 | static void |
2281 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | 2291 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
2282 | { | 2292 | { |
2283 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | ||
2284 | struct i915_address_space *vm; | ||
2285 | struct i915_vma *vma; | 2293 | struct i915_vma *vma; |
2286 | 2294 | ||
2287 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 2295 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
2288 | BUG_ON(!obj->active); | 2296 | BUG_ON(!obj->active); |
2289 | 2297 | ||
2290 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { | 2298 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
2291 | vma = i915_gem_obj_to_vma(obj, vm); | 2299 | if (!list_empty(&vma->mm_list)) |
2292 | if (vma && !list_empty(&vma->mm_list)) | 2300 | list_move_tail(&vma->mm_list, &vma->vm->inactive_list); |
2293 | list_move_tail(&vma->mm_list, &vm->inactive_list); | ||
2294 | } | 2301 | } |
2295 | 2302 | ||
2296 | intel_fb_obj_flush(obj, true); | 2303 | intel_fb_obj_flush(obj, true); |
2297 | 2304 | ||
2298 | list_del_init(&obj->ring_list); | 2305 | list_del_init(&obj->ring_list); |
2299 | obj->ring = NULL; | ||
2300 | 2306 | ||
2301 | obj->last_read_seqno = 0; | 2307 | i915_gem_request_assign(&obj->last_read_req, NULL); |
2302 | obj->last_write_seqno = 0; | 2308 | i915_gem_request_assign(&obj->last_write_req, NULL); |
2303 | obj->base.write_domain = 0; | 2309 | obj->base.write_domain = 0; |
2304 | 2310 | ||
2305 | obj->last_fenced_seqno = 0; | 2311 | i915_gem_request_assign(&obj->last_fenced_req, NULL); |
2306 | 2312 | ||
2307 | obj->active = 0; | 2313 | obj->active = 0; |
2308 | drm_gem_object_unreference(&obj->base); | 2314 | drm_gem_object_unreference(&obj->base); |
@@ -2313,13 +2319,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
2313 | static void | 2319 | static void |
2314 | i915_gem_object_retire(struct drm_i915_gem_object *obj) | 2320 | i915_gem_object_retire(struct drm_i915_gem_object *obj) |
2315 | { | 2321 | { |
2316 | struct intel_engine_cs *ring = obj->ring; | 2322 | if (obj->last_read_req == NULL) |
2317 | |||
2318 | if (ring == NULL) | ||
2319 | return; | 2323 | return; |
2320 | 2324 | ||
2321 | if (i915_seqno_passed(ring->get_seqno(ring, true), | 2325 | if (i915_gem_request_completed(obj->last_read_req, true)) |
2322 | obj->last_read_seqno)) | ||
2323 | i915_gem_object_move_to_inactive(obj); | 2326 | i915_gem_object_move_to_inactive(obj); |
2324 | } | 2327 | } |
2325 | 2328 | ||
@@ -2395,8 +2398,7 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) | |||
2395 | 2398 | ||
2396 | int __i915_add_request(struct intel_engine_cs *ring, | 2399 | int __i915_add_request(struct intel_engine_cs *ring, |
2397 | struct drm_file *file, | 2400 | struct drm_file *file, |
2398 | struct drm_i915_gem_object *obj, | 2401 | struct drm_i915_gem_object *obj) |
2399 | u32 *out_seqno) | ||
2400 | { | 2402 | { |
2401 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2403 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
2402 | struct drm_i915_gem_request *request; | 2404 | struct drm_i915_gem_request *request; |
@@ -2404,7 +2406,7 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
2404 | u32 request_ring_position, request_start; | 2406 | u32 request_ring_position, request_start; |
2405 | int ret; | 2407 | int ret; |
2406 | 2408 | ||
2407 | request = ring->preallocated_lazy_request; | 2409 | request = ring->outstanding_lazy_request; |
2408 | if (WARN_ON(request == NULL)) | 2410 | if (WARN_ON(request == NULL)) |
2409 | return -ENOMEM; | 2411 | return -ENOMEM; |
2410 | 2412 | ||
@@ -2449,8 +2451,6 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
2449 | return ret; | 2451 | return ret; |
2450 | } | 2452 | } |
2451 | 2453 | ||
2452 | request->seqno = intel_ring_get_seqno(ring); | ||
2453 | request->ring = ring; | ||
2454 | request->head = request_start; | 2454 | request->head = request_start; |
2455 | request->tail = request_ring_position; | 2455 | request->tail = request_ring_position; |
2456 | 2456 | ||
@@ -2485,9 +2485,8 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
2485 | spin_unlock(&file_priv->mm.lock); | 2485 | spin_unlock(&file_priv->mm.lock); |
2486 | } | 2486 | } |
2487 | 2487 | ||
2488 | trace_i915_gem_request_add(ring, request->seqno); | 2488 | trace_i915_gem_request_add(request); |
2489 | ring->outstanding_lazy_seqno = 0; | 2489 | ring->outstanding_lazy_request = NULL; |
2490 | ring->preallocated_lazy_request = NULL; | ||
2491 | 2490 | ||
2492 | i915_queue_hangcheck(ring->dev); | 2491 | i915_queue_hangcheck(ring->dev); |
2493 | 2492 | ||
@@ -2497,8 +2496,6 @@ int __i915_add_request(struct intel_engine_cs *ring, | |||
2497 | round_jiffies_up_relative(HZ)); | 2496 | round_jiffies_up_relative(HZ)); |
2498 | intel_mark_busy(dev_priv->dev); | 2497 | intel_mark_busy(dev_priv->dev); |
2499 | 2498 | ||
2500 | if (out_seqno) | ||
2501 | *out_seqno = request->seqno; | ||
2502 | return 0; | 2499 | return 0; |
2503 | } | 2500 | } |
2504 | 2501 | ||
@@ -2562,33 +2559,39 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv, | |||
2562 | 2559 | ||
2563 | static void i915_gem_free_request(struct drm_i915_gem_request *request) | 2560 | static void i915_gem_free_request(struct drm_i915_gem_request *request) |
2564 | { | 2561 | { |
2565 | struct intel_context *ctx = request->ctx; | ||
2566 | |||
2567 | list_del(&request->list); | 2562 | list_del(&request->list); |
2568 | i915_gem_request_remove_from_client(request); | 2563 | i915_gem_request_remove_from_client(request); |
2569 | 2564 | ||
2565 | i915_gem_request_unreference(request); | ||
2566 | } | ||
2567 | |||
2568 | void i915_gem_request_free(struct kref *req_ref) | ||
2569 | { | ||
2570 | struct drm_i915_gem_request *req = container_of(req_ref, | ||
2571 | typeof(*req), ref); | ||
2572 | struct intel_context *ctx = req->ctx; | ||
2573 | |||
2570 | if (ctx) { | 2574 | if (ctx) { |
2571 | if (i915.enable_execlists) { | 2575 | if (i915.enable_execlists) { |
2572 | struct intel_engine_cs *ring = request->ring; | 2576 | struct intel_engine_cs *ring = req->ring; |
2573 | 2577 | ||
2574 | if (ctx != ring->default_context) | 2578 | if (ctx != ring->default_context) |
2575 | intel_lr_context_unpin(ring, ctx); | 2579 | intel_lr_context_unpin(ring, ctx); |
2576 | } | 2580 | } |
2581 | |||
2577 | i915_gem_context_unreference(ctx); | 2582 | i915_gem_context_unreference(ctx); |
2578 | } | 2583 | } |
2579 | kfree(request); | 2584 | |
2585 | kfree(req); | ||
2580 | } | 2586 | } |
2581 | 2587 | ||
2582 | struct drm_i915_gem_request * | 2588 | struct drm_i915_gem_request * |
2583 | i915_gem_find_active_request(struct intel_engine_cs *ring) | 2589 | i915_gem_find_active_request(struct intel_engine_cs *ring) |
2584 | { | 2590 | { |
2585 | struct drm_i915_gem_request *request; | 2591 | struct drm_i915_gem_request *request; |
2586 | u32 completed_seqno; | ||
2587 | |||
2588 | completed_seqno = ring->get_seqno(ring, false); | ||
2589 | 2592 | ||
2590 | list_for_each_entry(request, &ring->request_list, list) { | 2593 | list_for_each_entry(request, &ring->request_list, list) { |
2591 | if (i915_seqno_passed(completed_seqno, request->seqno)) | 2594 | if (i915_gem_request_completed(request, false)) |
2592 | continue; | 2595 | continue; |
2593 | 2596 | ||
2594 | return request; | 2597 | return request; |
@@ -2663,10 +2666,8 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, | |||
2663 | i915_gem_free_request(request); | 2666 | i915_gem_free_request(request); |
2664 | } | 2667 | } |
2665 | 2668 | ||
2666 | /* These may not have been flush before the reset, do so now */ | 2669 | /* This may not have been flushed before the reset, so clean it now */ |
2667 | kfree(ring->preallocated_lazy_request); | 2670 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); |
2668 | ring->preallocated_lazy_request = NULL; | ||
2669 | ring->outstanding_lazy_seqno = 0; | ||
2670 | } | 2671 | } |
2671 | 2672 | ||
2672 | void i915_gem_restore_fences(struct drm_device *dev) | 2673 | void i915_gem_restore_fences(struct drm_device *dev) |
@@ -2718,15 +2719,11 @@ void i915_gem_reset(struct drm_device *dev) | |||
2718 | void | 2719 | void |
2719 | i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | 2720 | i915_gem_retire_requests_ring(struct intel_engine_cs *ring) |
2720 | { | 2721 | { |
2721 | uint32_t seqno; | ||
2722 | |||
2723 | if (list_empty(&ring->request_list)) | 2722 | if (list_empty(&ring->request_list)) |
2724 | return; | 2723 | return; |
2725 | 2724 | ||
2726 | WARN_ON(i915_verify_lists(ring->dev)); | 2725 | WARN_ON(i915_verify_lists(ring->dev)); |
2727 | 2726 | ||
2728 | seqno = ring->get_seqno(ring, true); | ||
2729 | |||
2730 | /* Move any buffers on the active list that are no longer referenced | 2727 | /* Move any buffers on the active list that are no longer referenced |
2731 | * by the ringbuffer to the flushing/inactive lists as appropriate, | 2728 | * by the ringbuffer to the flushing/inactive lists as appropriate, |
2732 | * before we free the context associated with the requests. | 2729 | * before we free the context associated with the requests. |
@@ -2738,7 +2735,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2738 | struct drm_i915_gem_object, | 2735 | struct drm_i915_gem_object, |
2739 | ring_list); | 2736 | ring_list); |
2740 | 2737 | ||
2741 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) | 2738 | if (!i915_gem_request_completed(obj->last_read_req, true)) |
2742 | break; | 2739 | break; |
2743 | 2740 | ||
2744 | i915_gem_object_move_to_inactive(obj); | 2741 | i915_gem_object_move_to_inactive(obj); |
@@ -2753,10 +2750,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2753 | struct drm_i915_gem_request, | 2750 | struct drm_i915_gem_request, |
2754 | list); | 2751 | list); |
2755 | 2752 | ||
2756 | if (!i915_seqno_passed(seqno, request->seqno)) | 2753 | if (!i915_gem_request_completed(request, true)) |
2757 | break; | 2754 | break; |
2758 | 2755 | ||
2759 | trace_i915_gem_request_retire(ring, request->seqno); | 2756 | trace_i915_gem_request_retire(request); |
2760 | 2757 | ||
2761 | /* This is one of the few common intersection points | 2758 | /* This is one of the few common intersection points |
2762 | * between legacy ringbuffer submission and execlists: | 2759 | * between legacy ringbuffer submission and execlists: |
@@ -2779,10 +2776,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) | |||
2779 | i915_gem_free_request(request); | 2776 | i915_gem_free_request(request); |
2780 | } | 2777 | } |
2781 | 2778 | ||
2782 | if (unlikely(ring->trace_irq_seqno && | 2779 | if (unlikely(ring->trace_irq_req && |
2783 | i915_seqno_passed(seqno, ring->trace_irq_seqno))) { | 2780 | i915_gem_request_completed(ring->trace_irq_req, true))) { |
2784 | ring->irq_put(ring); | 2781 | ring->irq_put(ring); |
2785 | ring->trace_irq_seqno = 0; | 2782 | i915_gem_request_assign(&ring->trace_irq_req, NULL); |
2786 | } | 2783 | } |
2787 | 2784 | ||
2788 | WARN_ON(i915_verify_lists(ring->dev)); | 2785 | WARN_ON(i915_verify_lists(ring->dev)); |
@@ -2854,14 +2851,17 @@ i915_gem_idle_work_handler(struct work_struct *work) | |||
2854 | static int | 2851 | static int |
2855 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) | 2852 | i915_gem_object_flush_active(struct drm_i915_gem_object *obj) |
2856 | { | 2853 | { |
2854 | struct intel_engine_cs *ring; | ||
2857 | int ret; | 2855 | int ret; |
2858 | 2856 | ||
2859 | if (obj->active) { | 2857 | if (obj->active) { |
2860 | ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno); | 2858 | ring = i915_gem_request_get_ring(obj->last_read_req); |
2859 | |||
2860 | ret = i915_gem_check_olr(obj->last_read_req); | ||
2861 | if (ret) | 2861 | if (ret) |
2862 | return ret; | 2862 | return ret; |
2863 | 2863 | ||
2864 | i915_gem_retire_requests_ring(obj->ring); | 2864 | i915_gem_retire_requests_ring(ring); |
2865 | } | 2865 | } |
2866 | 2866 | ||
2867 | return 0; | 2867 | return 0; |
@@ -2895,9 +2895,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2895 | struct drm_i915_private *dev_priv = dev->dev_private; | 2895 | struct drm_i915_private *dev_priv = dev->dev_private; |
2896 | struct drm_i915_gem_wait *args = data; | 2896 | struct drm_i915_gem_wait *args = data; |
2897 | struct drm_i915_gem_object *obj; | 2897 | struct drm_i915_gem_object *obj; |
2898 | struct intel_engine_cs *ring = NULL; | 2898 | struct drm_i915_gem_request *req; |
2899 | unsigned reset_counter; | 2899 | unsigned reset_counter; |
2900 | u32 seqno = 0; | ||
2901 | int ret = 0; | 2900 | int ret = 0; |
2902 | 2901 | ||
2903 | if (args->flags != 0) | 2902 | if (args->flags != 0) |
@@ -2918,13 +2917,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2918 | if (ret) | 2917 | if (ret) |
2919 | goto out; | 2918 | goto out; |
2920 | 2919 | ||
2921 | if (obj->active) { | 2920 | if (!obj->active || !obj->last_read_req) |
2922 | seqno = obj->last_read_seqno; | 2921 | goto out; |
2923 | ring = obj->ring; | ||
2924 | } | ||
2925 | 2922 | ||
2926 | if (seqno == 0) | 2923 | req = obj->last_read_req; |
2927 | goto out; | ||
2928 | 2924 | ||
2929 | /* Do this after OLR check to make sure we make forward progress polling | 2925 | /* Do this after OLR check to make sure we make forward progress polling |
2930 | * on this IOCTL with a timeout <=0 (like busy ioctl) | 2926 | * on this IOCTL with a timeout <=0 (like busy ioctl) |
@@ -2936,10 +2932,15 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
2936 | 2932 | ||
2937 | drm_gem_object_unreference(&obj->base); | 2933 | drm_gem_object_unreference(&obj->base); |
2938 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 2934 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
2935 | i915_gem_request_reference(req); | ||
2939 | mutex_unlock(&dev->struct_mutex); | 2936 | mutex_unlock(&dev->struct_mutex); |
2940 | 2937 | ||
2941 | return __i915_wait_seqno(ring, seqno, reset_counter, true, | 2938 | ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, |
2942 | &args->timeout_ns, file->driver_priv); | 2939 | file->driver_priv); |
2940 | mutex_lock(&dev->struct_mutex); | ||
2941 | i915_gem_request_unreference(req); | ||
2942 | mutex_unlock(&dev->struct_mutex); | ||
2943 | return ret; | ||
2943 | 2944 | ||
2944 | out: | 2945 | out: |
2945 | drm_gem_object_unreference(&obj->base); | 2946 | drm_gem_object_unreference(&obj->base); |
@@ -2963,10 +2964,12 @@ int | |||
2963 | i915_gem_object_sync(struct drm_i915_gem_object *obj, | 2964 | i915_gem_object_sync(struct drm_i915_gem_object *obj, |
2964 | struct intel_engine_cs *to) | 2965 | struct intel_engine_cs *to) |
2965 | { | 2966 | { |
2966 | struct intel_engine_cs *from = obj->ring; | 2967 | struct intel_engine_cs *from; |
2967 | u32 seqno; | 2968 | u32 seqno; |
2968 | int ret, idx; | 2969 | int ret, idx; |
2969 | 2970 | ||
2971 | from = i915_gem_request_get_ring(obj->last_read_req); | ||
2972 | |||
2970 | if (from == NULL || to == from) | 2973 | if (from == NULL || to == from) |
2971 | return 0; | 2974 | return 0; |
2972 | 2975 | ||
@@ -2975,24 +2978,25 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, | |||
2975 | 2978 | ||
2976 | idx = intel_ring_sync_index(from, to); | 2979 | idx = intel_ring_sync_index(from, to); |
2977 | 2980 | ||
2978 | seqno = obj->last_read_seqno; | 2981 | seqno = i915_gem_request_get_seqno(obj->last_read_req); |
2979 | /* Optimization: Avoid semaphore sync when we are sure we already | 2982 | /* Optimization: Avoid semaphore sync when we are sure we already |
2980 | * waited for an object with higher seqno */ | 2983 | * waited for an object with higher seqno */ |
2981 | if (seqno <= from->semaphore.sync_seqno[idx]) | 2984 | if (seqno <= from->semaphore.sync_seqno[idx]) |
2982 | return 0; | 2985 | return 0; |
2983 | 2986 | ||
2984 | ret = i915_gem_check_olr(obj->ring, seqno); | 2987 | ret = i915_gem_check_olr(obj->last_read_req); |
2985 | if (ret) | 2988 | if (ret) |
2986 | return ret; | 2989 | return ret; |
2987 | 2990 | ||
2988 | trace_i915_gem_ring_sync_to(from, to, seqno); | 2991 | trace_i915_gem_ring_sync_to(from, to, obj->last_read_req); |
2989 | ret = to->semaphore.sync_to(to, from, seqno); | 2992 | ret = to->semaphore.sync_to(to, from, seqno); |
2990 | if (!ret) | 2993 | if (!ret) |
2991 | /* We use last_read_seqno because sync_to() | 2994 | /* We use last_read_req because sync_to() |
2992 | * might have just caused seqno wrap under | 2995 | * might have just caused seqno wrap under |
2993 | * the radar. | 2996 | * the radar. |
2994 | */ | 2997 | */ |
2995 | from->semaphore.sync_seqno[idx] = obj->last_read_seqno; | 2998 | from->semaphore.sync_seqno[idx] = |
2999 | i915_gem_request_get_seqno(obj->last_read_req); | ||
2996 | 3000 | ||
2997 | return ret; | 3001 | return ret; |
2998 | } | 3002 | } |
@@ -3048,10 +3052,8 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
3048 | * cause memory corruption through use-after-free. | 3052 | * cause memory corruption through use-after-free. |
3049 | */ | 3053 | */ |
3050 | 3054 | ||
3051 | /* Throw away the active reference before moving to the unbound list */ | 3055 | if (i915_is_ggtt(vma->vm) && |
3052 | i915_gem_object_retire(obj); | 3056 | vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
3053 | |||
3054 | if (i915_is_ggtt(vma->vm)) { | ||
3055 | i915_gem_object_finish_gtt(obj); | 3057 | i915_gem_object_finish_gtt(obj); |
3056 | 3058 | ||
3057 | /* release the fence reg _after_ flushing */ | 3059 | /* release the fence reg _after_ flushing */ |
@@ -3065,8 +3067,15 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
3065 | vma->unbind_vma(vma); | 3067 | vma->unbind_vma(vma); |
3066 | 3068 | ||
3067 | list_del_init(&vma->mm_list); | 3069 | list_del_init(&vma->mm_list); |
3068 | if (i915_is_ggtt(vma->vm)) | 3070 | if (i915_is_ggtt(vma->vm)) { |
3069 | obj->map_and_fenceable = false; | 3071 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
3072 | obj->map_and_fenceable = false; | ||
3073 | } else if (vma->ggtt_view.pages) { | ||
3074 | sg_free_table(vma->ggtt_view.pages); | ||
3075 | kfree(vma->ggtt_view.pages); | ||
3076 | vma->ggtt_view.pages = NULL; | ||
3077 | } | ||
3078 | } | ||
3070 | 3079 | ||
3071 | drm_mm_remove_node(&vma->node); | 3080 | drm_mm_remove_node(&vma->node); |
3072 | i915_gem_vma_destroy(vma); | 3081 | i915_gem_vma_destroy(vma); |
@@ -3074,6 +3083,10 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
3074 | /* Since the unbound list is global, only move to that list if | 3083 | /* Since the unbound list is global, only move to that list if |
3075 | * no more VMAs exist. */ | 3084 | * no more VMAs exist. */ |
3076 | if (list_empty(&obj->vma_list)) { | 3085 | if (list_empty(&obj->vma_list)) { |
3086 | /* Throw away the active reference before | ||
3087 | * moving to the unbound list. */ | ||
3088 | i915_gem_object_retire(obj); | ||
3089 | |||
3077 | i915_gem_gtt_finish_object(obj); | 3090 | i915_gem_gtt_finish_object(obj); |
3078 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); | 3091 | list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
3079 | } | 3092 | } |
@@ -3257,17 +3270,12 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, | |||
3257 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | 3270 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", |
3258 | obj->stride, obj->tiling_mode); | 3271 | obj->stride, obj->tiling_mode); |
3259 | 3272 | ||
3260 | switch (INTEL_INFO(dev)->gen) { | 3273 | if (IS_GEN2(dev)) |
3261 | case 9: | 3274 | i830_write_fence_reg(dev, reg, obj); |
3262 | case 8: | 3275 | else if (IS_GEN3(dev)) |
3263 | case 7: | 3276 | i915_write_fence_reg(dev, reg, obj); |
3264 | case 6: | 3277 | else if (INTEL_INFO(dev)->gen >= 4) |
3265 | case 5: | 3278 | i965_write_fence_reg(dev, reg, obj); |
3266 | case 4: i965_write_fence_reg(dev, reg, obj); break; | ||
3267 | case 3: i915_write_fence_reg(dev, reg, obj); break; | ||
3268 | case 2: i830_write_fence_reg(dev, reg, obj); break; | ||
3269 | default: BUG(); | ||
3270 | } | ||
3271 | 3279 | ||
3272 | /* And similarly be paranoid that no direct access to this region | 3280 | /* And similarly be paranoid that no direct access to this region |
3273 | * is reordered to before the fence is installed. | 3281 | * is reordered to before the fence is installed. |
@@ -3306,12 +3314,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, | |||
3306 | static int | 3314 | static int |
3307 | i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) | 3315 | i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) |
3308 | { | 3316 | { |
3309 | if (obj->last_fenced_seqno) { | 3317 | if (obj->last_fenced_req) { |
3310 | int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); | 3318 | int ret = i915_wait_request(obj->last_fenced_req); |
3311 | if (ret) | 3319 | if (ret) |
3312 | return ret; | 3320 | return ret; |
3313 | 3321 | ||
3314 | obj->last_fenced_seqno = 0; | 3322 | i915_gem_request_assign(&obj->last_fenced_req, NULL); |
3315 | } | 3323 | } |
3316 | 3324 | ||
3317 | return 0; | 3325 | return 0; |
@@ -3484,7 +3492,8 @@ static struct i915_vma * | |||
3484 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | 3492 | i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
3485 | struct i915_address_space *vm, | 3493 | struct i915_address_space *vm, |
3486 | unsigned alignment, | 3494 | unsigned alignment, |
3487 | uint64_t flags) | 3495 | uint64_t flags, |
3496 | const struct i915_ggtt_view *view) | ||
3488 | { | 3497 | { |
3489 | struct drm_device *dev = obj->base.dev; | 3498 | struct drm_device *dev = obj->base.dev; |
3490 | struct drm_i915_private *dev_priv = dev->dev_private; | 3499 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3534,7 +3543,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, | |||
3534 | 3543 | ||
3535 | i915_gem_object_pin_pages(obj); | 3544 | i915_gem_object_pin_pages(obj); |
3536 | 3545 | ||
3537 | vma = i915_gem_obj_lookup_or_create_vma(obj, vm); | 3546 | vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); |
3538 | if (IS_ERR(vma)) | 3547 | if (IS_ERR(vma)) |
3539 | goto err_unpin; | 3548 | goto err_unpin; |
3540 | 3549 | ||
@@ -3564,15 +3573,19 @@ search_free: | |||
3564 | if (ret) | 3573 | if (ret) |
3565 | goto err_remove_node; | 3574 | goto err_remove_node; |
3566 | 3575 | ||
3576 | trace_i915_vma_bind(vma, flags); | ||
3577 | ret = i915_vma_bind(vma, obj->cache_level, | ||
3578 | flags & PIN_GLOBAL ? GLOBAL_BIND : 0); | ||
3579 | if (ret) | ||
3580 | goto err_finish_gtt; | ||
3581 | |||
3567 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); | 3582 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3568 | list_add_tail(&vma->mm_list, &vm->inactive_list); | 3583 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
3569 | 3584 | ||
3570 | trace_i915_vma_bind(vma, flags); | ||
3571 | vma->bind_vma(vma, obj->cache_level, | ||
3572 | flags & PIN_GLOBAL ? GLOBAL_BIND : 0); | ||
3573 | |||
3574 | return vma; | 3585 | return vma; |
3575 | 3586 | ||
3587 | err_finish_gtt: | ||
3588 | i915_gem_gtt_finish_object(obj); | ||
3576 | err_remove_node: | 3589 | err_remove_node: |
3577 | drm_mm_remove_node(&vma->node); | 3590 | drm_mm_remove_node(&vma->node); |
3578 | err_free_vma: | 3591 | err_free_vma: |
@@ -3775,9 +3788,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, | |||
3775 | } | 3788 | } |
3776 | 3789 | ||
3777 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3790 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3778 | if (drm_mm_node_allocated(&vma->node)) | 3791 | if (drm_mm_node_allocated(&vma->node)) { |
3779 | vma->bind_vma(vma, cache_level, | 3792 | ret = i915_vma_bind(vma, cache_level, |
3780 | vma->bound & GLOBAL_BIND); | 3793 | vma->bound & GLOBAL_BIND); |
3794 | if (ret) | ||
3795 | return ret; | ||
3796 | } | ||
3781 | } | 3797 | } |
3782 | 3798 | ||
3783 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 3799 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
@@ -3896,18 +3912,14 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) | |||
3896 | if (!vma) | 3912 | if (!vma) |
3897 | return false; | 3913 | return false; |
3898 | 3914 | ||
3899 | /* There are 3 sources that pin objects: | 3915 | /* There are 2 sources that pin objects: |
3900 | * 1. The display engine (scanouts, sprites, cursors); | 3916 | * 1. The display engine (scanouts, sprites, cursors); |
3901 | * 2. Reservations for execbuffer; | 3917 | * 2. Reservations for execbuffer; |
3902 | * 3. The user. | ||
3903 | * | 3918 | * |
3904 | * We can ignore reservations as we hold the struct_mutex and | 3919 | * We can ignore reservations as we hold the struct_mutex and |
3905 | * are only called outside of the reservation path. The user | 3920 | * are only called outside of the reservation path. |
3906 | * can only increment pin_count once, and so if after | ||
3907 | * subtracting the potential reference by the user, any pin_count | ||
3908 | * remains, it must be due to another use by the display engine. | ||
3909 | */ | 3921 | */ |
3910 | return vma->pin_count - !!obj->user_pin_count; | 3922 | return vma->pin_count; |
3911 | } | 3923 | } |
3912 | 3924 | ||
3913 | /* | 3925 | /* |
@@ -3924,7 +3936,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, | |||
3924 | bool was_pin_display; | 3936 | bool was_pin_display; |
3925 | int ret; | 3937 | int ret; |
3926 | 3938 | ||
3927 | if (pipelined != obj->ring) { | 3939 | if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { |
3928 | ret = i915_gem_object_sync(obj, pipelined); | 3940 | ret = i915_gem_object_sync(obj, pipelined); |
3929 | if (ret) | 3941 | if (ret) |
3930 | return ret; | 3942 | return ret; |
@@ -4076,10 +4088,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4076 | struct drm_i915_private *dev_priv = dev->dev_private; | 4088 | struct drm_i915_private *dev_priv = dev->dev_private; |
4077 | struct drm_i915_file_private *file_priv = file->driver_priv; | 4089 | struct drm_i915_file_private *file_priv = file->driver_priv; |
4078 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); | 4090 | unsigned long recent_enough = jiffies - msecs_to_jiffies(20); |
4079 | struct drm_i915_gem_request *request; | 4091 | struct drm_i915_gem_request *request, *target = NULL; |
4080 | struct intel_engine_cs *ring = NULL; | ||
4081 | unsigned reset_counter; | 4092 | unsigned reset_counter; |
4082 | u32 seqno = 0; | ||
4083 | int ret; | 4093 | int ret; |
4084 | 4094 | ||
4085 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); | 4095 | ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
@@ -4095,19 +4105,24 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) | |||
4095 | if (time_after_eq(request->emitted_jiffies, recent_enough)) | 4105 | if (time_after_eq(request->emitted_jiffies, recent_enough)) |
4096 | break; | 4106 | break; |
4097 | 4107 | ||
4098 | ring = request->ring; | 4108 | target = request; |
4099 | seqno = request->seqno; | ||
4100 | } | 4109 | } |
4101 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); | 4110 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
4111 | if (target) | ||
4112 | i915_gem_request_reference(target); | ||
4102 | spin_unlock(&file_priv->mm.lock); | 4113 | spin_unlock(&file_priv->mm.lock); |
4103 | 4114 | ||
4104 | if (seqno == 0) | 4115 | if (target == NULL) |
4105 | return 0; | 4116 | return 0; |
4106 | 4117 | ||
4107 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); | 4118 | ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); |
4108 | if (ret == 0) | 4119 | if (ret == 0) |
4109 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); | 4120 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
4110 | 4121 | ||
4122 | mutex_lock(&dev->struct_mutex); | ||
4123 | i915_gem_request_unreference(target); | ||
4124 | mutex_unlock(&dev->struct_mutex); | ||
4125 | |||
4111 | return ret; | 4126 | return ret; |
4112 | } | 4127 | } |
4113 | 4128 | ||
@@ -4131,10 +4146,11 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | |||
4131 | } | 4146 | } |
4132 | 4147 | ||
4133 | int | 4148 | int |
4134 | i915_gem_object_pin(struct drm_i915_gem_object *obj, | 4149 | i915_gem_object_pin_view(struct drm_i915_gem_object *obj, |
4135 | struct i915_address_space *vm, | 4150 | struct i915_address_space *vm, |
4136 | uint32_t alignment, | 4151 | uint32_t alignment, |
4137 | uint64_t flags) | 4152 | uint64_t flags, |
4153 | const struct i915_ggtt_view *view) | ||
4138 | { | 4154 | { |
4139 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; | 4155 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
4140 | struct i915_vma *vma; | 4156 | struct i915_vma *vma; |
@@ -4150,7 +4166,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
4150 | if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) | 4166 | if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) |
4151 | return -EINVAL; | 4167 | return -EINVAL; |
4152 | 4168 | ||
4153 | vma = i915_gem_obj_to_vma(obj, vm); | 4169 | vma = i915_gem_obj_to_vma_view(obj, vm, view); |
4154 | if (vma) { | 4170 | if (vma) { |
4155 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) | 4171 | if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
4156 | return -EBUSY; | 4172 | return -EBUSY; |
@@ -4160,7 +4176,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
4160 | "bo is already pinned with incorrect alignment:" | 4176 | "bo is already pinned with incorrect alignment:" |
4161 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," | 4177 | " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
4162 | " obj->map_and_fenceable=%d\n", | 4178 | " obj->map_and_fenceable=%d\n", |
4163 | i915_gem_obj_offset(obj, vm), alignment, | 4179 | i915_gem_obj_offset_view(obj, vm, view->type), |
4180 | alignment, | ||
4164 | !!(flags & PIN_MAPPABLE), | 4181 | !!(flags & PIN_MAPPABLE), |
4165 | obj->map_and_fenceable); | 4182 | obj->map_and_fenceable); |
4166 | ret = i915_vma_unbind(vma); | 4183 | ret = i915_vma_unbind(vma); |
@@ -4173,13 +4190,17 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, | |||
4173 | 4190 | ||
4174 | bound = vma ? vma->bound : 0; | 4191 | bound = vma ? vma->bound : 0; |
4175 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { | 4192 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { |
4176 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); | 4193 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, |
4194 | flags, view); | ||
4177 | if (IS_ERR(vma)) | 4195 | if (IS_ERR(vma)) |
4178 | return PTR_ERR(vma); | 4196 | return PTR_ERR(vma); |
4179 | } | 4197 | } |
4180 | 4198 | ||
4181 | if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) | 4199 | if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) { |
4182 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); | 4200 | ret = i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND); |
4201 | if (ret) | ||
4202 | return ret; | ||
4203 | } | ||
4183 | 4204 | ||
4184 | if ((bound ^ vma->bound) & GLOBAL_BIND) { | 4205 | if ((bound ^ vma->bound) & GLOBAL_BIND) { |
4185 | bool mappable, fenceable; | 4206 | bool mappable, fenceable; |
@@ -4251,102 +4272,6 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) | |||
4251 | } | 4272 | } |
4252 | 4273 | ||
4253 | int | 4274 | int |
4254 | i915_gem_pin_ioctl(struct drm_device *dev, void *data, | ||
4255 | struct drm_file *file) | ||
4256 | { | ||
4257 | struct drm_i915_gem_pin *args = data; | ||
4258 | struct drm_i915_gem_object *obj; | ||
4259 | int ret; | ||
4260 | |||
4261 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
4262 | return -ENODEV; | ||
4263 | |||
4264 | ret = i915_mutex_lock_interruptible(dev); | ||
4265 | if (ret) | ||
4266 | return ret; | ||
4267 | |||
4268 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
4269 | if (&obj->base == NULL) { | ||
4270 | ret = -ENOENT; | ||
4271 | goto unlock; | ||
4272 | } | ||
4273 | |||
4274 | if (obj->madv != I915_MADV_WILLNEED) { | ||
4275 | DRM_DEBUG("Attempting to pin a purgeable buffer\n"); | ||
4276 | ret = -EFAULT; | ||
4277 | goto out; | ||
4278 | } | ||
4279 | |||
4280 | if (obj->pin_filp != NULL && obj->pin_filp != file) { | ||
4281 | DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n", | ||
4282 | args->handle); | ||
4283 | ret = -EINVAL; | ||
4284 | goto out; | ||
4285 | } | ||
4286 | |||
4287 | if (obj->user_pin_count == ULONG_MAX) { | ||
4288 | ret = -EBUSY; | ||
4289 | goto out; | ||
4290 | } | ||
4291 | |||
4292 | if (obj->user_pin_count == 0) { | ||
4293 | ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE); | ||
4294 | if (ret) | ||
4295 | goto out; | ||
4296 | } | ||
4297 | |||
4298 | obj->user_pin_count++; | ||
4299 | obj->pin_filp = file; | ||
4300 | |||
4301 | args->offset = i915_gem_obj_ggtt_offset(obj); | ||
4302 | out: | ||
4303 | drm_gem_object_unreference(&obj->base); | ||
4304 | unlock: | ||
4305 | mutex_unlock(&dev->struct_mutex); | ||
4306 | return ret; | ||
4307 | } | ||
4308 | |||
4309 | int | ||
4310 | i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | ||
4311 | struct drm_file *file) | ||
4312 | { | ||
4313 | struct drm_i915_gem_pin *args = data; | ||
4314 | struct drm_i915_gem_object *obj; | ||
4315 | int ret; | ||
4316 | |||
4317 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
4318 | return -ENODEV; | ||
4319 | |||
4320 | ret = i915_mutex_lock_interruptible(dev); | ||
4321 | if (ret) | ||
4322 | return ret; | ||
4323 | |||
4324 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | ||
4325 | if (&obj->base == NULL) { | ||
4326 | ret = -ENOENT; | ||
4327 | goto unlock; | ||
4328 | } | ||
4329 | |||
4330 | if (obj->pin_filp != file) { | ||
4331 | DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", | ||
4332 | args->handle); | ||
4333 | ret = -EINVAL; | ||
4334 | goto out; | ||
4335 | } | ||
4336 | obj->user_pin_count--; | ||
4337 | if (obj->user_pin_count == 0) { | ||
4338 | obj->pin_filp = NULL; | ||
4339 | i915_gem_object_ggtt_unpin(obj); | ||
4340 | } | ||
4341 | |||
4342 | out: | ||
4343 | drm_gem_object_unreference(&obj->base); | ||
4344 | unlock: | ||
4345 | mutex_unlock(&dev->struct_mutex); | ||
4346 | return ret; | ||
4347 | } | ||
4348 | |||
4349 | int | ||
4350 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, | 4275 | i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
4351 | struct drm_file *file) | 4276 | struct drm_file *file) |
4352 | { | 4277 | { |
@@ -4372,9 +4297,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4372 | ret = i915_gem_object_flush_active(obj); | 4297 | ret = i915_gem_object_flush_active(obj); |
4373 | 4298 | ||
4374 | args->busy = obj->active; | 4299 | args->busy = obj->active; |
4375 | if (obj->ring) { | 4300 | if (obj->last_read_req) { |
4301 | struct intel_engine_cs *ring; | ||
4376 | BUILD_BUG_ON(I915_NUM_RINGS > 16); | 4302 | BUILD_BUG_ON(I915_NUM_RINGS > 16); |
4377 | args->busy |= intel_ring_flag(obj->ring) << 16; | 4303 | ring = i915_gem_request_get_ring(obj->last_read_req); |
4304 | args->busy |= intel_ring_flag(ring) << 16; | ||
4378 | } | 4305 | } |
4379 | 4306 | ||
4380 | drm_gem_object_unreference(&obj->base); | 4307 | drm_gem_object_unreference(&obj->base); |
@@ -4454,6 +4381,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
4454 | INIT_LIST_HEAD(&obj->ring_list); | 4381 | INIT_LIST_HEAD(&obj->ring_list); |
4455 | INIT_LIST_HEAD(&obj->obj_exec_link); | 4382 | INIT_LIST_HEAD(&obj->obj_exec_link); |
4456 | INIT_LIST_HEAD(&obj->vma_list); | 4383 | INIT_LIST_HEAD(&obj->vma_list); |
4384 | INIT_LIST_HEAD(&obj->batch_pool_list); | ||
4457 | 4385 | ||
4458 | obj->ops = ops; | 4386 | obj->ops = ops; |
4459 | 4387 | ||
@@ -4609,12 +4537,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) | |||
4609 | intel_runtime_pm_put(dev_priv); | 4537 | intel_runtime_pm_put(dev_priv); |
4610 | } | 4538 | } |
4611 | 4539 | ||
4612 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, | 4540 | struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, |
4613 | struct i915_address_space *vm) | 4541 | struct i915_address_space *vm, |
4542 | const struct i915_ggtt_view *view) | ||
4614 | { | 4543 | { |
4615 | struct i915_vma *vma; | 4544 | struct i915_vma *vma; |
4616 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 4545 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
4617 | if (vma->vm == vm) | 4546 | if (vma->vm == vm && vma->ggtt_view.type == view->type) |
4618 | return vma; | 4547 | return vma; |
4619 | 4548 | ||
4620 | return NULL; | 4549 | return NULL; |
@@ -4674,6 +4603,11 @@ i915_gem_suspend(struct drm_device *dev) | |||
4674 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 4603 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
4675 | flush_delayed_work(&dev_priv->mm.idle_work); | 4604 | flush_delayed_work(&dev_priv->mm.idle_work); |
4676 | 4605 | ||
4606 | /* Assert that we sucessfully flushed all the work and | ||
4607 | * reset the GPU back to its idle, low power state. | ||
4608 | */ | ||
4609 | WARN_ON(dev_priv->mm.busy); | ||
4610 | |||
4677 | return 0; | 4611 | return 0; |
4678 | 4612 | ||
4679 | err: | 4613 | err: |
@@ -4785,14 +4719,6 @@ int i915_gem_init_rings(struct drm_device *dev) | |||
4785 | struct drm_i915_private *dev_priv = dev->dev_private; | 4719 | struct drm_i915_private *dev_priv = dev->dev_private; |
4786 | int ret; | 4720 | int ret; |
4787 | 4721 | ||
4788 | /* | ||
4789 | * At least 830 can leave some of the unused rings | ||
4790 | * "active" (ie. head != tail) after resume which | ||
4791 | * will prevent c3 entry. Makes sure all unused rings | ||
4792 | * are totally idle. | ||
4793 | */ | ||
4794 | init_unused_rings(dev); | ||
4795 | |||
4796 | ret = intel_init_render_ring_buffer(dev); | 4722 | ret = intel_init_render_ring_buffer(dev); |
4797 | if (ret) | 4723 | if (ret) |
4798 | return ret; | 4724 | return ret; |
@@ -4845,6 +4771,7 @@ int | |||
4845 | i915_gem_init_hw(struct drm_device *dev) | 4771 | i915_gem_init_hw(struct drm_device *dev) |
4846 | { | 4772 | { |
4847 | struct drm_i915_private *dev_priv = dev->dev_private; | 4773 | struct drm_i915_private *dev_priv = dev->dev_private; |
4774 | struct intel_engine_cs *ring; | ||
4848 | int ret, i; | 4775 | int ret, i; |
4849 | 4776 | ||
4850 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) | 4777 | if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
@@ -4871,9 +4798,19 @@ i915_gem_init_hw(struct drm_device *dev) | |||
4871 | 4798 | ||
4872 | i915_gem_init_swizzling(dev); | 4799 | i915_gem_init_swizzling(dev); |
4873 | 4800 | ||
4874 | ret = dev_priv->gt.init_rings(dev); | 4801 | /* |
4875 | if (ret) | 4802 | * At least 830 can leave some of the unused rings |
4876 | return ret; | 4803 | * "active" (ie. head != tail) after resume which |
4804 | * will prevent c3 entry. Makes sure all unused rings | ||
4805 | * are totally idle. | ||
4806 | */ | ||
4807 | init_unused_rings(dev); | ||
4808 | |||
4809 | for_each_ring(ring, dev_priv, i) { | ||
4810 | ret = ring->init_hw(ring); | ||
4811 | if (ret) | ||
4812 | return ret; | ||
4813 | } | ||
4877 | 4814 | ||
4878 | for (i = 0; i < NUM_L3_SLICES(dev); i++) | 4815 | for (i = 0; i < NUM_L3_SLICES(dev); i++) |
4879 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); | 4816 | i915_gem_l3_remap(&dev_priv->ring[RCS], i); |
@@ -4933,18 +4870,18 @@ int i915_gem_init(struct drm_device *dev) | |||
4933 | } | 4870 | } |
4934 | 4871 | ||
4935 | ret = i915_gem_init_userptr(dev); | 4872 | ret = i915_gem_init_userptr(dev); |
4936 | if (ret) { | 4873 | if (ret) |
4937 | mutex_unlock(&dev->struct_mutex); | 4874 | goto out_unlock; |
4938 | return ret; | ||
4939 | } | ||
4940 | 4875 | ||
4941 | i915_gem_init_global_gtt(dev); | 4876 | i915_gem_init_global_gtt(dev); |
4942 | 4877 | ||
4943 | ret = i915_gem_context_init(dev); | 4878 | ret = i915_gem_context_init(dev); |
4944 | if (ret) { | 4879 | if (ret) |
4945 | mutex_unlock(&dev->struct_mutex); | 4880 | goto out_unlock; |
4946 | return ret; | 4881 | |
4947 | } | 4882 | ret = dev_priv->gt.init_rings(dev); |
4883 | if (ret) | ||
4884 | goto out_unlock; | ||
4948 | 4885 | ||
4949 | ret = i915_gem_init_hw(dev); | 4886 | ret = i915_gem_init_hw(dev); |
4950 | if (ret == -EIO) { | 4887 | if (ret == -EIO) { |
@@ -4956,6 +4893,8 @@ int i915_gem_init(struct drm_device *dev) | |||
4956 | atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); | 4893 | atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); |
4957 | ret = 0; | 4894 | ret = 0; |
4958 | } | 4895 | } |
4896 | |||
4897 | out_unlock: | ||
4959 | mutex_unlock(&dev->struct_mutex); | 4898 | mutex_unlock(&dev->struct_mutex); |
4960 | 4899 | ||
4961 | return ret; | 4900 | return ret; |
@@ -5056,6 +4995,8 @@ i915_gem_load(struct drm_device *dev) | |||
5056 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; | 4995 | dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; |
5057 | register_oom_notifier(&dev_priv->mm.oom_notifier); | 4996 | register_oom_notifier(&dev_priv->mm.oom_notifier); |
5058 | 4997 | ||
4998 | i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); | ||
4999 | |||
5059 | mutex_init(&dev_priv->fb_tracking.lock); | 5000 | mutex_init(&dev_priv->fb_tracking.lock); |
5060 | } | 5001 | } |
5061 | 5002 | ||
@@ -5216,8 +5157,9 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) | |||
5216 | } | 5157 | } |
5217 | 5158 | ||
5218 | /* All the new VM stuff */ | 5159 | /* All the new VM stuff */ |
5219 | unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, | 5160 | unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, |
5220 | struct i915_address_space *vm) | 5161 | struct i915_address_space *vm, |
5162 | enum i915_ggtt_view_type view) | ||
5221 | { | 5163 | { |
5222 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; | 5164 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
5223 | struct i915_vma *vma; | 5165 | struct i915_vma *vma; |
@@ -5225,7 +5167,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, | |||
5225 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); | 5167 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
5226 | 5168 | ||
5227 | list_for_each_entry(vma, &o->vma_list, vma_link) { | 5169 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
5228 | if (vma->vm == vm) | 5170 | if (vma->vm == vm && vma->ggtt_view.type == view) |
5229 | return vma->node.start; | 5171 | return vma->node.start; |
5230 | 5172 | ||
5231 | } | 5173 | } |
@@ -5234,13 +5176,16 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, | |||
5234 | return -1; | 5176 | return -1; |
5235 | } | 5177 | } |
5236 | 5178 | ||
5237 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, | 5179 | bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, |
5238 | struct i915_address_space *vm) | 5180 | struct i915_address_space *vm, |
5181 | enum i915_ggtt_view_type view) | ||
5239 | { | 5182 | { |
5240 | struct i915_vma *vma; | 5183 | struct i915_vma *vma; |
5241 | 5184 | ||
5242 | list_for_each_entry(vma, &o->vma_list, vma_link) | 5185 | list_for_each_entry(vma, &o->vma_list, vma_link) |
5243 | if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) | 5186 | if (vma->vm == vm && |
5187 | vma->ggtt_view.type == view && | ||
5188 | drm_mm_node_allocated(&vma->node)) | ||
5244 | return true; | 5189 | return true; |
5245 | 5190 | ||
5246 | return false; | 5191 | return false; |
@@ -5372,11 +5317,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) | |||
5372 | 5317 | ||
5373 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) | 5318 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
5374 | { | 5319 | { |
5320 | struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); | ||
5375 | struct i915_vma *vma; | 5321 | struct i915_vma *vma; |
5376 | 5322 | ||
5377 | vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); | 5323 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
5378 | if (vma->vm != i915_obj_to_ggtt(obj)) | 5324 | if (vma->vm == ggtt && |
5379 | return NULL; | 5325 | vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) |
5326 | return vma; | ||
5380 | 5327 | ||
5381 | return vma; | 5328 | return NULL; |
5382 | } | 5329 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c new file mode 100644 index 000000000000..c690170a1c4f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
21 | * IN THE SOFTWARE. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #include "i915_drv.h" | ||
26 | |||
27 | /** | ||
28 | * DOC: batch pool | ||
29 | * | ||
30 | * In order to submit batch buffers as 'secure', the software command parser | ||
31 | * must ensure that a batch buffer cannot be modified after parsing. It does | ||
32 | * this by copying the user provided batch buffer contents to a kernel owned | ||
33 | * buffer from which the hardware will actually execute, and by carefully | ||
34 | * managing the address space bindings for such buffers. | ||
35 | * | ||
36 | * The batch pool framework provides a mechanism for the driver to manage a | ||
37 | * set of scratch buffers to use for this purpose. The framework can be | ||
38 | * extended to support other uses cases should they arise. | ||
39 | */ | ||
40 | |||
41 | /** | ||
42 | * i915_gem_batch_pool_init() - initialize a batch buffer pool | ||
43 | * @dev: the drm device | ||
44 | * @pool: the batch buffer pool | ||
45 | */ | ||
46 | void i915_gem_batch_pool_init(struct drm_device *dev, | ||
47 | struct i915_gem_batch_pool *pool) | ||
48 | { | ||
49 | pool->dev = dev; | ||
50 | INIT_LIST_HEAD(&pool->cache_list); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * i915_gem_batch_pool_fini() - clean up a batch buffer pool | ||
55 | * @pool: the pool to clean up | ||
56 | * | ||
57 | * Note: Callers must hold the struct_mutex. | ||
58 | */ | ||
59 | void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) | ||
60 | { | ||
61 | WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); | ||
62 | |||
63 | while (!list_empty(&pool->cache_list)) { | ||
64 | struct drm_i915_gem_object *obj = | ||
65 | list_first_entry(&pool->cache_list, | ||
66 | struct drm_i915_gem_object, | ||
67 | batch_pool_list); | ||
68 | |||
69 | WARN_ON(obj->active); | ||
70 | |||
71 | list_del_init(&obj->batch_pool_list); | ||
72 | drm_gem_object_unreference(&obj->base); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * i915_gem_batch_pool_get() - select a buffer from the pool | ||
78 | * @pool: the batch buffer pool | ||
79 | * @size: the minimum desired size of the returned buffer | ||
80 | * | ||
81 | * Finds or allocates a batch buffer in the pool with at least the requested | ||
82 | * size. The caller is responsible for any domain, active/inactive, or | ||
83 | * purgeability management for the returned buffer. | ||
84 | * | ||
85 | * Note: Callers must hold the struct_mutex | ||
86 | * | ||
87 | * Return: the selected batch buffer object | ||
88 | */ | ||
89 | struct drm_i915_gem_object * | ||
90 | i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, | ||
91 | size_t size) | ||
92 | { | ||
93 | struct drm_i915_gem_object *obj = NULL; | ||
94 | struct drm_i915_gem_object *tmp, *next; | ||
95 | |||
96 | WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex)); | ||
97 | |||
98 | list_for_each_entry_safe(tmp, next, | ||
99 | &pool->cache_list, batch_pool_list) { | ||
100 | |||
101 | if (tmp->active) | ||
102 | continue; | ||
103 | |||
104 | /* While we're looping, do some clean up */ | ||
105 | if (tmp->madv == __I915_MADV_PURGED) { | ||
106 | list_del(&tmp->batch_pool_list); | ||
107 | drm_gem_object_unreference(&tmp->base); | ||
108 | continue; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Select a buffer that is at least as big as needed | ||
113 | * but not 'too much' bigger. A better way to do this | ||
114 | * might be to bucket the pool objects based on size. | ||
115 | */ | ||
116 | if (tmp->base.size >= size && | ||
117 | tmp->base.size <= (2 * size)) { | ||
118 | obj = tmp; | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | if (!obj) { | ||
124 | obj = i915_gem_alloc_object(pool->dev, size); | ||
125 | if (!obj) | ||
126 | return ERR_PTR(-ENOMEM); | ||
127 | |||
128 | list_add_tail(&obj->batch_pool_list, &pool->cache_list); | ||
129 | } | ||
130 | else | ||
131 | /* Keep list in LRU order */ | ||
132 | list_move_tail(&obj->batch_pool_list, &pool->cache_list); | ||
133 | |||
134 | obj->madv = I915_MADV_WILLNEED; | ||
135 | |||
136 | return obj; | ||
137 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index d011ec82ef1e..9b23fb1f5bf6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -408,14 +408,25 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv) | |||
408 | 408 | ||
409 | BUG_ON(!dev_priv->ring[RCS].default_context); | 409 | BUG_ON(!dev_priv->ring[RCS].default_context); |
410 | 410 | ||
411 | if (i915.enable_execlists) | 411 | if (i915.enable_execlists) { |
412 | return 0; | 412 | for_each_ring(ring, dev_priv, i) { |
413 | if (ring->init_context) { | ||
414 | ret = ring->init_context(ring, | ||
415 | ring->default_context); | ||
416 | if (ret) { | ||
417 | DRM_ERROR("ring init context: %d\n", | ||
418 | ret); | ||
419 | return ret; | ||
420 | } | ||
421 | } | ||
422 | } | ||
413 | 423 | ||
414 | for_each_ring(ring, dev_priv, i) { | 424 | } else |
415 | ret = i915_switch_context(ring, ring->default_context); | 425 | for_each_ring(ring, dev_priv, i) { |
416 | if (ret) | 426 | ret = i915_switch_context(ring, ring->default_context); |
417 | return ret; | 427 | if (ret) |
418 | } | 428 | return ret; |
429 | } | ||
419 | 430 | ||
420 | return 0; | 431 | return 0; |
421 | } | 432 | } |
@@ -611,9 +622,14 @@ static int do_switch(struct intel_engine_cs *ring, | |||
611 | goto unpin_out; | 622 | goto unpin_out; |
612 | 623 | ||
613 | vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); | 624 | vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state); |
614 | if (!(vma->bound & GLOBAL_BIND)) | 625 | if (!(vma->bound & GLOBAL_BIND)) { |
615 | vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, | 626 | ret = i915_vma_bind(vma, |
616 | GLOBAL_BIND); | 627 | to->legacy_hw_ctx.rcs_state->cache_level, |
628 | GLOBAL_BIND); | ||
629 | /* This shouldn't ever fail. */ | ||
630 | if (WARN_ONCE(ret, "GGTT context bind failed!")) | ||
631 | goto unpin_out; | ||
632 | } | ||
617 | 633 | ||
618 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) | 634 | if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) |
619 | hw_flags |= MI_RESTORE_INHIBIT; | 635 | hw_flags |= MI_RESTORE_INHIBIT; |
@@ -651,7 +667,8 @@ static int do_switch(struct intel_engine_cs *ring, | |||
651 | * swapped, but there is no way to do that yet. | 667 | * swapped, but there is no way to do that yet. |
652 | */ | 668 | */ |
653 | from->legacy_hw_ctx.rcs_state->dirty = 1; | 669 | from->legacy_hw_ctx.rcs_state->dirty = 1; |
654 | BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); | 670 | BUG_ON(i915_gem_request_get_ring( |
671 | from->legacy_hw_ctx.rcs_state->last_read_req) != ring); | ||
655 | 672 | ||
656 | /* obj is kept alive until the next request by its active ref */ | 673 | /* obj is kept alive until the next request by its active ref */ |
657 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); | 674 | i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); |
@@ -671,10 +688,6 @@ done: | |||
671 | if (ret) | 688 | if (ret) |
672 | DRM_ERROR("ring init context: %d\n", ret); | 689 | DRM_ERROR("ring init context: %d\n", ret); |
673 | } | 690 | } |
674 | |||
675 | ret = i915_gem_render_state_init(ring); | ||
676 | if (ret) | ||
677 | DRM_ERROR("init render state: %d\n", ret); | ||
678 | } | 691 | } |
679 | 692 | ||
680 | return 0; | 693 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 11738316394a..1d6e0929ab83 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) | 37 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
38 | #define __EXEC_OBJECT_NEEDS_MAP (1<<29) | 38 | #define __EXEC_OBJECT_NEEDS_MAP (1<<29) |
39 | #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) | 39 | #define __EXEC_OBJECT_NEEDS_BIAS (1<<28) |
40 | #define __EXEC_OBJECT_PURGEABLE (1<<27) | ||
40 | 41 | ||
41 | #define BATCH_OFFSET_BIAS (256*1024) | 42 | #define BATCH_OFFSET_BIAS (256*1024) |
42 | 43 | ||
@@ -223,7 +224,12 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) | |||
223 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) | 224 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
224 | vma->pin_count--; | 225 | vma->pin_count--; |
225 | 226 | ||
226 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); | 227 | if (entry->flags & __EXEC_OBJECT_PURGEABLE) |
228 | obj->madv = I915_MADV_DONTNEED; | ||
229 | |||
230 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | | ||
231 | __EXEC_OBJECT_HAS_PIN | | ||
232 | __EXEC_OBJECT_PURGEABLE); | ||
227 | } | 233 | } |
228 | 234 | ||
229 | static void eb_destroy(struct eb_vmas *eb) | 235 | static void eb_destroy(struct eb_vmas *eb) |
@@ -357,9 +363,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, | |||
357 | * through the ppgtt for non_secure batchbuffers. */ | 363 | * through the ppgtt for non_secure batchbuffers. */ |
358 | if (unlikely(IS_GEN6(dev) && | 364 | if (unlikely(IS_GEN6(dev) && |
359 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && | 365 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
360 | !(target_vma->bound & GLOBAL_BIND))) | 366 | !(target_vma->bound & GLOBAL_BIND))) { |
361 | target_vma->bind_vma(target_vma, target_i915_obj->cache_level, | 367 | ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, |
362 | GLOBAL_BIND); | 368 | GLOBAL_BIND); |
369 | if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) | ||
370 | return ret; | ||
371 | } | ||
363 | 372 | ||
364 | /* Validate that the target is in a valid r/w GPU domain */ | 373 | /* Validate that the target is in a valid r/w GPU domain */ |
365 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { | 374 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
@@ -943,7 +952,7 @@ void | |||
943 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, | 952 | i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
944 | struct intel_engine_cs *ring) | 953 | struct intel_engine_cs *ring) |
945 | { | 954 | { |
946 | u32 seqno = intel_ring_get_seqno(ring); | 955 | struct drm_i915_gem_request *req = intel_ring_get_request(ring); |
947 | struct i915_vma *vma; | 956 | struct i915_vma *vma; |
948 | 957 | ||
949 | list_for_each_entry(vma, vmas, exec_list) { | 958 | list_for_each_entry(vma, vmas, exec_list) { |
@@ -960,7 +969,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
960 | i915_vma_move_to_active(vma, ring); | 969 | i915_vma_move_to_active(vma, ring); |
961 | if (obj->base.write_domain) { | 970 | if (obj->base.write_domain) { |
962 | obj->dirty = 1; | 971 | obj->dirty = 1; |
963 | obj->last_write_seqno = seqno; | 972 | i915_gem_request_assign(&obj->last_write_req, req); |
964 | 973 | ||
965 | intel_fb_obj_invalidate(obj, ring); | 974 | intel_fb_obj_invalidate(obj, ring); |
966 | 975 | ||
@@ -968,7 +977,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, | |||
968 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 977 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
969 | } | 978 | } |
970 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { | 979 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
971 | obj->last_fenced_seqno = seqno; | 980 | i915_gem_request_assign(&obj->last_fenced_req, req); |
972 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { | 981 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { |
973 | struct drm_i915_private *dev_priv = to_i915(ring->dev); | 982 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
974 | list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, | 983 | list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, |
@@ -990,7 +999,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev, | |||
990 | ring->gpu_caches_dirty = true; | 999 | ring->gpu_caches_dirty = true; |
991 | 1000 | ||
992 | /* Add a breadcrumb for the completion of the batch buffer */ | 1001 | /* Add a breadcrumb for the completion of the batch buffer */ |
993 | (void)__i915_add_request(ring, file, obj, NULL); | 1002 | (void)__i915_add_request(ring, file, obj); |
994 | } | 1003 | } |
995 | 1004 | ||
996 | static int | 1005 | static int |
@@ -1060,6 +1069,65 @@ i915_emit_box(struct intel_engine_cs *ring, | |||
1060 | return 0; | 1069 | return 0; |
1061 | } | 1070 | } |
1062 | 1071 | ||
1072 | static struct drm_i915_gem_object* | ||
1073 | i915_gem_execbuffer_parse(struct intel_engine_cs *ring, | ||
1074 | struct drm_i915_gem_exec_object2 *shadow_exec_entry, | ||
1075 | struct eb_vmas *eb, | ||
1076 | struct drm_i915_gem_object *batch_obj, | ||
1077 | u32 batch_start_offset, | ||
1078 | u32 batch_len, | ||
1079 | bool is_master, | ||
1080 | u32 *flags) | ||
1081 | { | ||
1082 | struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); | ||
1083 | struct drm_i915_gem_object *shadow_batch_obj; | ||
1084 | int ret; | ||
1085 | |||
1086 | shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, | ||
1087 | batch_obj->base.size); | ||
1088 | if (IS_ERR(shadow_batch_obj)) | ||
1089 | return shadow_batch_obj; | ||
1090 | |||
1091 | ret = i915_parse_cmds(ring, | ||
1092 | batch_obj, | ||
1093 | shadow_batch_obj, | ||
1094 | batch_start_offset, | ||
1095 | batch_len, | ||
1096 | is_master); | ||
1097 | if (ret) { | ||
1098 | if (ret == -EACCES) | ||
1099 | return batch_obj; | ||
1100 | } else { | ||
1101 | struct i915_vma *vma; | ||
1102 | |||
1103 | memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); | ||
1104 | |||
1105 | vma = i915_gem_obj_to_ggtt(shadow_batch_obj); | ||
1106 | vma->exec_entry = shadow_exec_entry; | ||
1107 | vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE; | ||
1108 | drm_gem_object_reference(&shadow_batch_obj->base); | ||
1109 | list_add_tail(&vma->exec_list, &eb->vmas); | ||
1110 | |||
1111 | shadow_batch_obj->base.pending_read_domains = | ||
1112 | batch_obj->base.pending_read_domains; | ||
1113 | |||
1114 | /* | ||
1115 | * Set the DISPATCH_SECURE bit to remove the NON_SECURE | ||
1116 | * bit from MI_BATCH_BUFFER_START commands issued in the | ||
1117 | * dispatch_execbuffer implementations. We specifically | ||
1118 | * don't want that set when the command parser is | ||
1119 | * enabled. | ||
1120 | * | ||
1121 | * FIXME: with aliasing ppgtt, buffers that should only | ||
1122 | * be in ggtt still end up in the aliasing ppgtt. remove | ||
1123 | * this check when that is fixed. | ||
1124 | */ | ||
1125 | if (USES_FULL_PPGTT(dev)) | ||
1126 | *flags |= I915_DISPATCH_SECURE; | ||
1127 | } | ||
1128 | |||
1129 | return ret ? ERR_PTR(ret) : shadow_batch_obj; | ||
1130 | } | ||
1063 | 1131 | ||
1064 | int | 1132 | int |
1065 | i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | 1133 | i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, |
@@ -1208,7 +1276,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, | |||
1208 | return ret; | 1276 | return ret; |
1209 | } | 1277 | } |
1210 | 1278 | ||
1211 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); | 1279 | trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); |
1212 | 1280 | ||
1213 | i915_gem_execbuffer_move_to_active(vmas, ring); | 1281 | i915_gem_execbuffer_move_to_active(vmas, ring); |
1214 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); | 1282 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
@@ -1277,6 +1345,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1277 | struct drm_i915_private *dev_priv = dev->dev_private; | 1345 | struct drm_i915_private *dev_priv = dev->dev_private; |
1278 | struct eb_vmas *eb; | 1346 | struct eb_vmas *eb; |
1279 | struct drm_i915_gem_object *batch_obj; | 1347 | struct drm_i915_gem_object *batch_obj; |
1348 | struct drm_i915_gem_exec_object2 shadow_exec_entry; | ||
1280 | struct intel_engine_cs *ring; | 1349 | struct intel_engine_cs *ring; |
1281 | struct intel_context *ctx; | 1350 | struct intel_context *ctx; |
1282 | struct i915_address_space *vm; | 1351 | struct i915_address_space *vm; |
@@ -1393,28 +1462,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
1393 | ret = -EINVAL; | 1462 | ret = -EINVAL; |
1394 | goto err; | 1463 | goto err; |
1395 | } | 1464 | } |
1396 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
1397 | 1465 | ||
1398 | if (i915_needs_cmd_parser(ring)) { | 1466 | if (i915_needs_cmd_parser(ring)) { |
1399 | ret = i915_parse_cmds(ring, | 1467 | batch_obj = i915_gem_execbuffer_parse(ring, |
1400 | batch_obj, | 1468 | &shadow_exec_entry, |
1401 | args->batch_start_offset, | 1469 | eb, |
1402 | file->is_master); | 1470 | batch_obj, |
1403 | if (ret) { | 1471 | args->batch_start_offset, |
1404 | if (ret != -EACCES) | 1472 | args->batch_len, |
1405 | goto err; | 1473 | file->is_master, |
1406 | } else { | 1474 | &flags); |
1407 | /* | 1475 | if (IS_ERR(batch_obj)) { |
1408 | * XXX: Actually do this when enabling batch copy... | 1476 | ret = PTR_ERR(batch_obj); |
1409 | * | 1477 | goto err; |
1410 | * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit | ||
1411 | * from MI_BATCH_BUFFER_START commands issued in the | ||
1412 | * dispatch_execbuffer implementations. We specifically don't | ||
1413 | * want that set when the command parser is enabled. | ||
1414 | */ | ||
1415 | } | 1478 | } |
1416 | } | 1479 | } |
1417 | 1480 | ||
1481 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; | ||
1482 | |||
1418 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure | 1483 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
1419 | * batch" bit. Hence we need to pin secure batches into the global gtt. | 1484 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
1420 | * hsw should have this fixed, but bdw mucks it up again. */ | 1485 | * hsw should have this fixed, but bdw mucks it up again. */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 171f6eafdeee..746f77fb57a3 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -30,6 +30,68 @@ | |||
30 | #include "i915_trace.h" | 30 | #include "i915_trace.h" |
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | /** | ||
34 | * DOC: Global GTT views | ||
35 | * | ||
36 | * Background and previous state | ||
37 | * | ||
38 | * Historically objects could exists (be bound) in global GTT space only as | ||
39 | * singular instances with a view representing all of the object's backing pages | ||
40 | * in a linear fashion. This view will be called a normal view. | ||
41 | * | ||
42 | * To support multiple views of the same object, where the number of mapped | ||
43 | * pages is not equal to the backing store, or where the layout of the pages | ||
44 | * is not linear, concept of a GGTT view was added. | ||
45 | * | ||
46 | * One example of an alternative view is a stereo display driven by a single | ||
47 | * image. In this case we would have a framebuffer looking like this | ||
48 | * (2x2 pages): | ||
49 | * | ||
50 | * 12 | ||
51 | * 34 | ||
52 | * | ||
53 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU | ||
54 | * rendering. In contrast, fed to the display engine would be an alternative | ||
55 | * view which could look something like this: | ||
56 | * | ||
57 | * 1212 | ||
58 | * 3434 | ||
59 | * | ||
60 | * In this example both the size and layout of pages in the alternative view is | ||
61 | * different from the normal view. | ||
62 | * | ||
63 | * Implementation and usage | ||
64 | * | ||
65 | * GGTT views are implemented using VMAs and are distinguished via enum | ||
66 | * i915_ggtt_view_type and struct i915_ggtt_view. | ||
67 | * | ||
68 | * A new flavour of core GEM functions which work with GGTT bound objects were | ||
69 | * added with the _view suffix. They take the struct i915_ggtt_view parameter | ||
70 | * encapsulating all metadata required to implement a view. | ||
71 | * | ||
72 | * As a helper for callers which are only interested in the normal view, | ||
73 | * globally const i915_ggtt_view_normal singleton instance exists. All old core | ||
74 | * GEM API functions, the ones not taking the view parameter, are operating on, | ||
75 | * or with the normal GGTT view. | ||
76 | * | ||
77 | * Code wanting to add or use a new GGTT view needs to: | ||
78 | * | ||
79 | * 1. Add a new enum with a suitable name. | ||
80 | * 2. Extend the metadata in the i915_ggtt_view structure if required. | ||
81 | * 3. Add support to i915_get_vma_pages(). | ||
82 | * | ||
83 | * New views are required to build a scatter-gather table from within the | ||
84 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and | ||
85 | * exists for the lifetime of an VMA. | ||
86 | * | ||
87 | * Core API is designed to have copy semantics which means that passed in | ||
88 | * struct i915_ggtt_view does not need to be persistent (left around after | ||
89 | * calling the core API functions). | ||
90 | * | ||
91 | */ | ||
92 | |||
93 | const struct i915_ggtt_view i915_ggtt_view_normal; | ||
94 | |||
33 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); | 95 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); |
34 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); | 96 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); |
35 | 97 | ||
@@ -40,8 +102,6 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | |||
40 | 102 | ||
41 | has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; | 103 | has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; |
42 | has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; | 104 | has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; |
43 | if (IS_GEN8(dev)) | ||
44 | has_full_ppgtt = false; /* XXX why? */ | ||
45 | 105 | ||
46 | /* | 106 | /* |
47 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for | 107 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for |
@@ -72,7 +132,10 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) | |||
72 | return 0; | 132 | return 0; |
73 | } | 133 | } |
74 | 134 | ||
75 | return has_aliasing_ppgtt ? 1 : 0; | 135 | if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) |
136 | return 2; | ||
137 | else | ||
138 | return has_aliasing_ppgtt ? 1 : 0; | ||
76 | } | 139 | } |
77 | 140 | ||
78 | 141 | ||
@@ -132,7 +195,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, | |||
132 | pte |= GEN6_PTE_UNCACHED; | 195 | pte |= GEN6_PTE_UNCACHED; |
133 | break; | 196 | break; |
134 | default: | 197 | default: |
135 | WARN_ON(1); | 198 | MISSING_CASE(level); |
136 | } | 199 | } |
137 | 200 | ||
138 | return pte; | 201 | return pte; |
@@ -156,7 +219,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, | |||
156 | pte |= GEN6_PTE_UNCACHED; | 219 | pte |= GEN6_PTE_UNCACHED; |
157 | break; | 220 | break; |
158 | default: | 221 | default: |
159 | WARN_ON(1); | 222 | MISSING_CASE(level); |
160 | } | 223 | } |
161 | 224 | ||
162 | return pte; | 225 | return pte; |
@@ -1102,10 +1165,8 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | |||
1102 | 1165 | ||
1103 | if (INTEL_INFO(dev)->gen < 8) | 1166 | if (INTEL_INFO(dev)->gen < 8) |
1104 | return gen6_ppgtt_init(ppgtt); | 1167 | return gen6_ppgtt_init(ppgtt); |
1105 | else if (IS_GEN8(dev) || IS_GEN9(dev)) | ||
1106 | return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); | ||
1107 | else | 1168 | else |
1108 | BUG(); | 1169 | return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); |
1109 | } | 1170 | } |
1110 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) | 1171 | int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) |
1111 | { | 1172 | { |
@@ -1146,7 +1207,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) | |||
1146 | else if (INTEL_INFO(dev)->gen >= 8) | 1207 | else if (INTEL_INFO(dev)->gen >= 8) |
1147 | gen8_ppgtt_enable(dev); | 1208 | gen8_ppgtt_enable(dev); |
1148 | else | 1209 | else |
1149 | WARN_ON(1); | 1210 | MISSING_CASE(INTEL_INFO(dev)->gen); |
1150 | 1211 | ||
1151 | if (ppgtt) { | 1212 | if (ppgtt) { |
1152 | for_each_ring(ring, dev_priv, i) { | 1213 | for_each_ring(ring, dev_priv, i) { |
@@ -1341,9 +1402,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) | |||
1341 | /* The bind_vma code tries to be smart about tracking mappings. | 1402 | /* The bind_vma code tries to be smart about tracking mappings. |
1342 | * Unfortunately above, we've just wiped out the mappings | 1403 | * Unfortunately above, we've just wiped out the mappings |
1343 | * without telling our object about it. So we need to fake it. | 1404 | * without telling our object about it. So we need to fake it. |
1405 | * | ||
1406 | * Bind is not expected to fail since this is only called on | ||
1407 | * resume and assumption is all requirements exist already. | ||
1344 | */ | 1408 | */ |
1345 | vma->bound &= ~GLOBAL_BIND; | 1409 | vma->bound &= ~GLOBAL_BIND; |
1346 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); | 1410 | WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND)); |
1347 | } | 1411 | } |
1348 | 1412 | ||
1349 | 1413 | ||
@@ -1538,7 +1602,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma, | |||
1538 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | 1602 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
1539 | 1603 | ||
1540 | BUG_ON(!i915_is_ggtt(vma->vm)); | 1604 | BUG_ON(!i915_is_ggtt(vma->vm)); |
1541 | intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); | 1605 | intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags); |
1542 | vma->bound = GLOBAL_BIND; | 1606 | vma->bound = GLOBAL_BIND; |
1543 | } | 1607 | } |
1544 | 1608 | ||
@@ -1588,7 +1652,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, | |||
1588 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { | 1652 | if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { |
1589 | if (!(vma->bound & GLOBAL_BIND) || | 1653 | if (!(vma->bound & GLOBAL_BIND) || |
1590 | (cache_level != obj->cache_level)) { | 1654 | (cache_level != obj->cache_level)) { |
1591 | vma->vm->insert_entries(vma->vm, obj->pages, | 1655 | vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, |
1592 | vma->node.start, | 1656 | vma->node.start, |
1593 | cache_level, flags); | 1657 | cache_level, flags); |
1594 | vma->bound |= GLOBAL_BIND; | 1658 | vma->bound |= GLOBAL_BIND; |
@@ -1600,7 +1664,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, | |||
1600 | (cache_level != obj->cache_level))) { | 1664 | (cache_level != obj->cache_level))) { |
1601 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; | 1665 | struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; |
1602 | appgtt->base.insert_entries(&appgtt->base, | 1666 | appgtt->base.insert_entries(&appgtt->base, |
1603 | vma->obj->pages, | 1667 | vma->ggtt_view.pages, |
1604 | vma->node.start, | 1668 | vma->node.start, |
1605 | cache_level, flags); | 1669 | cache_level, flags); |
1606 | vma->bound |= LOCAL_BIND; | 1670 | vma->bound |= LOCAL_BIND; |
@@ -2165,7 +2229,8 @@ int i915_gem_gtt_init(struct drm_device *dev) | |||
2165 | } | 2229 | } |
2166 | 2230 | ||
2167 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, | 2231 | static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, |
2168 | struct i915_address_space *vm) | 2232 | struct i915_address_space *vm, |
2233 | const struct i915_ggtt_view *view) | ||
2169 | { | 2234 | { |
2170 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); | 2235 | struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
2171 | if (vma == NULL) | 2236 | if (vma == NULL) |
@@ -2176,12 +2241,9 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, | |||
2176 | INIT_LIST_HEAD(&vma->exec_list); | 2241 | INIT_LIST_HEAD(&vma->exec_list); |
2177 | vma->vm = vm; | 2242 | vma->vm = vm; |
2178 | vma->obj = obj; | 2243 | vma->obj = obj; |
2244 | vma->ggtt_view = *view; | ||
2179 | 2245 | ||
2180 | switch (INTEL_INFO(vm->dev)->gen) { | 2246 | if (INTEL_INFO(vm->dev)->gen >= 6) { |
2181 | case 9: | ||
2182 | case 8: | ||
2183 | case 7: | ||
2184 | case 6: | ||
2185 | if (i915_is_ggtt(vm)) { | 2247 | if (i915_is_ggtt(vm)) { |
2186 | vma->unbind_vma = ggtt_unbind_vma; | 2248 | vma->unbind_vma = ggtt_unbind_vma; |
2187 | vma->bind_vma = ggtt_bind_vma; | 2249 | vma->bind_vma = ggtt_bind_vma; |
@@ -2189,39 +2251,73 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, | |||
2189 | vma->unbind_vma = ppgtt_unbind_vma; | 2251 | vma->unbind_vma = ppgtt_unbind_vma; |
2190 | vma->bind_vma = ppgtt_bind_vma; | 2252 | vma->bind_vma = ppgtt_bind_vma; |
2191 | } | 2253 | } |
2192 | break; | 2254 | } else { |
2193 | case 5: | ||
2194 | case 4: | ||
2195 | case 3: | ||
2196 | case 2: | ||
2197 | BUG_ON(!i915_is_ggtt(vm)); | 2255 | BUG_ON(!i915_is_ggtt(vm)); |
2198 | vma->unbind_vma = i915_ggtt_unbind_vma; | 2256 | vma->unbind_vma = i915_ggtt_unbind_vma; |
2199 | vma->bind_vma = i915_ggtt_bind_vma; | 2257 | vma->bind_vma = i915_ggtt_bind_vma; |
2200 | break; | ||
2201 | default: | ||
2202 | BUG(); | ||
2203 | } | 2258 | } |
2204 | 2259 | ||
2205 | /* Keep GGTT vmas first to make debug easier */ | 2260 | list_add_tail(&vma->vma_link, &obj->vma_list); |
2206 | if (i915_is_ggtt(vm)) | 2261 | if (!i915_is_ggtt(vm)) |
2207 | list_add(&vma->vma_link, &obj->vma_list); | ||
2208 | else { | ||
2209 | list_add_tail(&vma->vma_link, &obj->vma_list); | ||
2210 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); | 2262 | i915_ppgtt_get(i915_vm_to_ppgtt(vm)); |
2211 | } | ||
2212 | 2263 | ||
2213 | return vma; | 2264 | return vma; |
2214 | } | 2265 | } |
2215 | 2266 | ||
2216 | struct i915_vma * | 2267 | struct i915_vma * |
2217 | i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, | 2268 | i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, |
2218 | struct i915_address_space *vm) | 2269 | struct i915_address_space *vm, |
2270 | const struct i915_ggtt_view *view) | ||
2219 | { | 2271 | { |
2220 | struct i915_vma *vma; | 2272 | struct i915_vma *vma; |
2221 | 2273 | ||
2222 | vma = i915_gem_obj_to_vma(obj, vm); | 2274 | vma = i915_gem_obj_to_vma_view(obj, vm, view); |
2223 | if (!vma) | 2275 | if (!vma) |
2224 | vma = __i915_gem_vma_create(obj, vm); | 2276 | vma = __i915_gem_vma_create(obj, vm, view); |
2225 | 2277 | ||
2226 | return vma; | 2278 | return vma; |
2227 | } | 2279 | } |
2280 | |||
2281 | static inline | ||
2282 | int i915_get_vma_pages(struct i915_vma *vma) | ||
2283 | { | ||
2284 | if (vma->ggtt_view.pages) | ||
2285 | return 0; | ||
2286 | |||
2287 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) | ||
2288 | vma->ggtt_view.pages = vma->obj->pages; | ||
2289 | else | ||
2290 | WARN_ONCE(1, "GGTT view %u not implemented!\n", | ||
2291 | vma->ggtt_view.type); | ||
2292 | |||
2293 | if (!vma->ggtt_view.pages) { | ||
2294 | DRM_ERROR("Failed to get pages for VMA view type %u!\n", | ||
2295 | vma->ggtt_view.type); | ||
2296 | return -EINVAL; | ||
2297 | } | ||
2298 | |||
2299 | return 0; | ||
2300 | } | ||
2301 | |||
2302 | /** | ||
2303 | * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. | ||
2304 | * @vma: VMA to map | ||
2305 | * @cache_level: mapping cache level | ||
2306 | * @flags: flags like global or local mapping | ||
2307 | * | ||
2308 | * DMA addresses are taken from the scatter-gather table of this object (or of | ||
2309 | * this VMA in case of non-default GGTT views) and PTE entries set up. | ||
2310 | * Note that DMA addresses are also the only part of the SG table we care about. | ||
2311 | */ | ||
2312 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | ||
2313 | u32 flags) | ||
2314 | { | ||
2315 | int ret = i915_get_vma_pages(vma); | ||
2316 | |||
2317 | if (ret) | ||
2318 | return ret; | ||
2319 | |||
2320 | vma->bind_vma(vma, cache_level, flags); | ||
2321 | |||
2322 | return 0; | ||
2323 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index beaf4bcfdac8..e377c7d27bd4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -109,7 +109,20 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; | |||
109 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) | 109 | #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) |
110 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) | 110 | #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8)) |
111 | 111 | ||
112 | enum i915_ggtt_view_type { | ||
113 | I915_GGTT_VIEW_NORMAL = 0, | ||
114 | }; | ||
115 | |||
116 | struct i915_ggtt_view { | ||
117 | enum i915_ggtt_view_type type; | ||
118 | |||
119 | struct sg_table *pages; | ||
120 | }; | ||
121 | |||
122 | extern const struct i915_ggtt_view i915_ggtt_view_normal; | ||
123 | |||
112 | enum i915_cache_level; | 124 | enum i915_cache_level; |
125 | |||
113 | /** | 126 | /** |
114 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a | 127 | * A VMA represents a GEM BO that is bound into an address space. Therefore, a |
115 | * VMA's presence cannot be guaranteed before binding, or after unbinding the | 128 | * VMA's presence cannot be guaranteed before binding, or after unbinding the |
@@ -129,6 +142,15 @@ struct i915_vma { | |||
129 | #define PTE_READ_ONLY (1<<2) | 142 | #define PTE_READ_ONLY (1<<2) |
130 | unsigned int bound : 4; | 143 | unsigned int bound : 4; |
131 | 144 | ||
145 | /** | ||
146 | * Support different GGTT views into the same object. | ||
147 | * This means there can be multiple VMA mappings per object and per VM. | ||
148 | * i915_ggtt_view_type is used to distinguish between those entries. | ||
149 | * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also | ||
150 | * assumed in GEM functions which take no ggtt view parameter. | ||
151 | */ | ||
152 | struct i915_ggtt_view ggtt_view; | ||
153 | |||
132 | /** This object's place on the active/inactive lists */ | 154 | /** This object's place on the active/inactive lists */ |
133 | struct list_head mm_list; | 155 | struct list_head mm_list; |
134 | 156 | ||
@@ -146,11 +168,10 @@ struct i915_vma { | |||
146 | 168 | ||
147 | /** | 169 | /** |
148 | * How many users have pinned this object in GTT space. The following | 170 | * How many users have pinned this object in GTT space. The following |
149 | * users can each hold at most one reference: pwrite/pread, pin_ioctl | 171 | * users can each hold at most one reference: pwrite/pread, execbuffer |
150 | * (via user_pin_count), execbuffer (objects are not allowed multiple | 172 | * (objects are not allowed multiple times for the same batchbuffer), |
151 | * times for the same batchbuffer), and the framebuffer code. When | 173 | * and the framebuffer code. When switching/pageflipping, the |
152 | * switching/pageflipping, the framebuffer code has at most two buffers | 174 | * framebuffer code has at most two buffers pinned per crtc. |
153 | * pinned per crtc. | ||
154 | * | 175 | * |
155 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 | 176 | * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 |
156 | * bits with absolutely no headroom. So use 4 bits. */ | 177 | * bits with absolutely no headroom. So use 4 bits. */ |
@@ -182,7 +203,7 @@ struct i915_address_space { | |||
182 | * List of objects currently involved in rendering. | 203 | * List of objects currently involved in rendering. |
183 | * | 204 | * |
184 | * Includes buffers having the contents of their GPU caches | 205 | * Includes buffers having the contents of their GPU caches |
185 | * flushed, not necessarily primitives. last_rendering_seqno | 206 | * flushed, not necessarily primitives. last_read_req |
186 | * represents when the rendering involved will be completed. | 207 | * represents when the rendering involved will be completed. |
187 | * | 208 | * |
188 | * A reference is held on the buffer while on this list. | 209 | * A reference is held on the buffer while on this list. |
@@ -193,7 +214,7 @@ struct i915_address_space { | |||
193 | * LRU list of objects which are not in the ringbuffer and | 214 | * LRU list of objects which are not in the ringbuffer and |
194 | * are ready to unbind, but are still in the GTT. | 215 | * are ready to unbind, but are still in the GTT. |
195 | * | 216 | * |
196 | * last_rendering_seqno is 0 while an object is in this list. | 217 | * last_read_req is NULL while an object is in this list. |
197 | * | 218 | * |
198 | * A reference is not held on the buffer while on this list, | 219 | * A reference is not held on the buffer while on this list, |
199 | * as merely being GTT-bound shouldn't prevent its being | 220 | * as merely being GTT-bound shouldn't prevent its being |
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 98dcd94acba8..521548a08578 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c | |||
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring) | |||
173 | 173 | ||
174 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); | 174 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); |
175 | 175 | ||
176 | ret = __i915_add_request(ring, NULL, so.obj, NULL); | 176 | ret = __i915_add_request(ring, NULL, so.obj); |
177 | /* __i915_add_request moves object to inactive if it fails */ | 177 | /* __i915_add_request moves object to inactive if it fails */ |
178 | out: | 178 | out: |
179 | i915_gem_render_state_fini(&so); | 179 | i915_gem_render_state_fini(&so); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4727a4e2c87c..7a24bd1a51f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -399,7 +399,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
399 | } | 399 | } |
400 | 400 | ||
401 | obj->fence_dirty = | 401 | obj->fence_dirty = |
402 | obj->last_fenced_seqno || | 402 | obj->last_fenced_req || |
403 | obj->fence_reg != I915_FENCE_REG_NONE; | 403 | obj->fence_reg != I915_FENCE_REG_NONE; |
404 | 404 | ||
405 | obj->tiling_mode = args->tiling_mode; | 405 | obj->tiling_mode = args->tiling_mode; |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index cdaee6ce05f8..be5c9908659b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -670,8 +670,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
670 | 670 | ||
671 | err->size = obj->base.size; | 671 | err->size = obj->base.size; |
672 | err->name = obj->base.name; | 672 | err->name = obj->base.name; |
673 | err->rseqno = obj->last_read_seqno; | 673 | err->rseqno = i915_gem_request_get_seqno(obj->last_read_req); |
674 | err->wseqno = obj->last_write_seqno; | 674 | err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); |
675 | err->gtt_offset = vma->node.start; | 675 | err->gtt_offset = vma->node.start; |
676 | err->read_domains = obj->base.read_domains; | 676 | err->read_domains = obj->base.read_domains; |
677 | err->write_domain = obj->base.write_domain; | 677 | err->write_domain = obj->base.write_domain; |
@@ -679,13 +679,12 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
679 | err->pinned = 0; | 679 | err->pinned = 0; |
680 | if (i915_gem_obj_is_pinned(obj)) | 680 | if (i915_gem_obj_is_pinned(obj)) |
681 | err->pinned = 1; | 681 | err->pinned = 1; |
682 | if (obj->user_pin_count > 0) | ||
683 | err->pinned = -1; | ||
684 | err->tiling = obj->tiling_mode; | 682 | err->tiling = obj->tiling_mode; |
685 | err->dirty = obj->dirty; | 683 | err->dirty = obj->dirty; |
686 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | 684 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
687 | err->userptr = obj->userptr.mm != NULL; | 685 | err->userptr = obj->userptr.mm != NULL; |
688 | err->ring = obj->ring ? obj->ring->id : -1; | 686 | err->ring = obj->last_read_req ? |
687 | i915_gem_request_get_ring(obj->last_read_req)->id : -1; | ||
689 | err->cache_level = obj->cache_level; | 688 | err->cache_level = obj->cache_level; |
690 | } | 689 | } |
691 | 690 | ||
@@ -719,10 +718,8 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | |||
719 | break; | 718 | break; |
720 | 719 | ||
721 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 720 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
722 | if (vma->vm == vm && vma->pin_count > 0) { | 721 | if (vma->vm == vm && vma->pin_count > 0) |
723 | capture_bo(err++, vma); | 722 | capture_bo(err++, vma); |
724 | break; | ||
725 | } | ||
726 | } | 723 | } |
727 | 724 | ||
728 | return err - first; | 725 | return err - first; |
@@ -767,32 +764,21 @@ static void i915_gem_record_fences(struct drm_device *dev, | |||
767 | struct drm_i915_private *dev_priv = dev->dev_private; | 764 | struct drm_i915_private *dev_priv = dev->dev_private; |
768 | int i; | 765 | int i; |
769 | 766 | ||
770 | /* Fences */ | 767 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
771 | switch (INTEL_INFO(dev)->gen) { | ||
772 | case 9: | ||
773 | case 8: | ||
774 | case 7: | ||
775 | case 6: | ||
776 | for (i = 0; i < dev_priv->num_fence_regs; i++) | ||
777 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); | ||
778 | break; | ||
779 | case 5: | ||
780 | case 4: | ||
781 | for (i = 0; i < 16; i++) | ||
782 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | ||
783 | break; | ||
784 | case 3: | ||
785 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | ||
786 | for (i = 0; i < 8; i++) | ||
787 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | ||
788 | case 2: | ||
789 | for (i = 0; i < 8; i++) | 768 | for (i = 0; i < 8; i++) |
790 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | 769 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
791 | break; | 770 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
792 | 771 | for (i = 0; i < 8; i++) | |
793 | default: | 772 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + |
794 | BUG(); | 773 | (i * 4)); |
795 | } | 774 | } else if (IS_GEN5(dev) || IS_GEN4(dev)) |
775 | for (i = 0; i < 16; i++) | ||
776 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + | ||
777 | (i * 8)); | ||
778 | else if (INTEL_INFO(dev)->gen >= 6) | ||
779 | for (i = 0; i < dev_priv->num_fence_regs; i++) | ||
780 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + | ||
781 | (i * 8)); | ||
796 | } | 782 | } |
797 | 783 | ||
798 | 784 | ||
@@ -926,9 +912,13 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
926 | 912 | ||
927 | ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); | 913 | ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); |
928 | 914 | ||
929 | switch (INTEL_INFO(dev)->gen) { | 915 | if (IS_GEN6(dev)) |
930 | case 9: | 916 | ering->vm_info.pp_dir_base = |
931 | case 8: | 917 | I915_READ(RING_PP_DIR_BASE_READ(ring)); |
918 | else if (IS_GEN7(dev)) | ||
919 | ering->vm_info.pp_dir_base = | ||
920 | I915_READ(RING_PP_DIR_BASE(ring)); | ||
921 | else if (INTEL_INFO(dev)->gen >= 8) | ||
932 | for (i = 0; i < 4; i++) { | 922 | for (i = 0; i < 4; i++) { |
933 | ering->vm_info.pdp[i] = | 923 | ering->vm_info.pdp[i] = |
934 | I915_READ(GEN8_RING_PDP_UDW(ring, i)); | 924 | I915_READ(GEN8_RING_PDP_UDW(ring, i)); |
@@ -936,16 +926,6 @@ static void i915_record_ring_state(struct drm_device *dev, | |||
936 | ering->vm_info.pdp[i] |= | 926 | ering->vm_info.pdp[i] |= |
937 | I915_READ(GEN8_RING_PDP_LDW(ring, i)); | 927 | I915_READ(GEN8_RING_PDP_LDW(ring, i)); |
938 | } | 928 | } |
939 | break; | ||
940 | case 7: | ||
941 | ering->vm_info.pp_dir_base = | ||
942 | I915_READ(RING_PP_DIR_BASE(ring)); | ||
943 | break; | ||
944 | case 6: | ||
945 | ering->vm_info.pp_dir_base = | ||
946 | I915_READ(RING_PP_DIR_BASE_READ(ring)); | ||
947 | break; | ||
948 | } | ||
949 | } | 929 | } |
950 | } | 930 | } |
951 | 931 | ||
@@ -1097,10 +1077,8 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, | |||
1097 | 1077 | ||
1098 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 1078 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
1099 | list_for_each_entry(vma, &obj->vma_list, vma_link) | 1079 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
1100 | if (vma->vm == vm && vma->pin_count > 0) { | 1080 | if (vma->vm == vm && vma->pin_count > 0) |
1101 | i++; | 1081 | i++; |
1102 | break; | ||
1103 | } | ||
1104 | } | 1082 | } |
1105 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; | 1083 | error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; |
1106 | 1084 | ||
@@ -1378,26 +1356,15 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) | |||
1378 | struct drm_i915_private *dev_priv = dev->dev_private; | 1356 | struct drm_i915_private *dev_priv = dev->dev_private; |
1379 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | 1357 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); |
1380 | 1358 | ||
1381 | switch (INTEL_INFO(dev)->gen) { | 1359 | if (IS_GEN2(dev) || IS_GEN3(dev)) |
1382 | case 2: | ||
1383 | case 3: | ||
1384 | instdone[0] = I915_READ(INSTDONE); | 1360 | instdone[0] = I915_READ(INSTDONE); |
1385 | break; | 1361 | else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { |
1386 | case 4: | ||
1387 | case 5: | ||
1388 | case 6: | ||
1389 | instdone[0] = I915_READ(INSTDONE_I965); | 1362 | instdone[0] = I915_READ(INSTDONE_I965); |
1390 | instdone[1] = I915_READ(INSTDONE1); | 1363 | instdone[1] = I915_READ(INSTDONE1); |
1391 | break; | 1364 | } else if (INTEL_INFO(dev)->gen >= 7) { |
1392 | default: | ||
1393 | WARN_ONCE(1, "Unsupported platform\n"); | ||
1394 | case 7: | ||
1395 | case 8: | ||
1396 | case 9: | ||
1397 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | 1365 | instdone[0] = I915_READ(GEN7_INSTDONE_1); |
1398 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | 1366 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
1399 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | 1367 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); |
1400 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | 1368 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); |
1401 | break; | ||
1402 | } | 1369 | } |
1403 | } | 1370 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 996c2931c499..5d837735a3c2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -183,6 +183,8 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |||
183 | { | 183 | { |
184 | assert_spin_locked(&dev_priv->irq_lock); | 184 | assert_spin_locked(&dev_priv->irq_lock); |
185 | 185 | ||
186 | WARN_ON(enabled_irq_mask & ~interrupt_mask); | ||
187 | |||
186 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | 188 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
187 | return; | 189 | return; |
188 | 190 | ||
@@ -229,6 +231,8 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |||
229 | { | 231 | { |
230 | uint32_t new_val; | 232 | uint32_t new_val; |
231 | 233 | ||
234 | WARN_ON(enabled_irq_mask & ~interrupt_mask); | ||
235 | |||
232 | assert_spin_locked(&dev_priv->irq_lock); | 236 | assert_spin_locked(&dev_priv->irq_lock); |
233 | 237 | ||
234 | new_val = dev_priv->pm_irq_mask; | 238 | new_val = dev_priv->pm_irq_mask; |
@@ -332,6 +336,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |||
332 | sdeimr &= ~interrupt_mask; | 336 | sdeimr &= ~interrupt_mask; |
333 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | 337 | sdeimr |= (~enabled_irq_mask & interrupt_mask); |
334 | 338 | ||
339 | WARN_ON(enabled_irq_mask & ~interrupt_mask); | ||
340 | |||
335 | assert_spin_locked(&dev_priv->irq_lock); | 341 | assert_spin_locked(&dev_priv->irq_lock); |
336 | 342 | ||
337 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | 343 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
@@ -1017,7 +1023,7 @@ static void notify_ring(struct drm_device *dev, | |||
1017 | if (!intel_ring_initialized(ring)) | 1023 | if (!intel_ring_initialized(ring)) |
1018 | return; | 1024 | return; |
1019 | 1025 | ||
1020 | trace_i915_gem_request_complete(ring); | 1026 | trace_i915_gem_request_notify(ring); |
1021 | 1027 | ||
1022 | wake_up_all(&ring->irq_queue); | 1028 | wake_up_all(&ring->irq_queue); |
1023 | } | 1029 | } |
@@ -1383,14 +1389,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1383 | if (rcs & GT_RENDER_USER_INTERRUPT) | 1389 | if (rcs & GT_RENDER_USER_INTERRUPT) |
1384 | notify_ring(dev, ring); | 1390 | notify_ring(dev, ring); |
1385 | if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) | 1391 | if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) |
1386 | intel_execlists_handle_ctx_events(ring); | 1392 | intel_lrc_irq_handler(ring); |
1387 | 1393 | ||
1388 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | 1394 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; |
1389 | ring = &dev_priv->ring[BCS]; | 1395 | ring = &dev_priv->ring[BCS]; |
1390 | if (bcs & GT_RENDER_USER_INTERRUPT) | 1396 | if (bcs & GT_RENDER_USER_INTERRUPT) |
1391 | notify_ring(dev, ring); | 1397 | notify_ring(dev, ring); |
1392 | if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) | 1398 | if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) |
1393 | intel_execlists_handle_ctx_events(ring); | 1399 | intel_lrc_irq_handler(ring); |
1394 | } else | 1400 | } else |
1395 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | 1401 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); |
1396 | } | 1402 | } |
@@ -1406,14 +1412,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1406 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1412 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1407 | notify_ring(dev, ring); | 1413 | notify_ring(dev, ring); |
1408 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | 1414 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
1409 | intel_execlists_handle_ctx_events(ring); | 1415 | intel_lrc_irq_handler(ring); |
1410 | 1416 | ||
1411 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; | 1417 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; |
1412 | ring = &dev_priv->ring[VCS2]; | 1418 | ring = &dev_priv->ring[VCS2]; |
1413 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1419 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1414 | notify_ring(dev, ring); | 1420 | notify_ring(dev, ring); |
1415 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | 1421 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
1416 | intel_execlists_handle_ctx_events(ring); | 1422 | intel_lrc_irq_handler(ring); |
1417 | } else | 1423 | } else |
1418 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | 1424 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); |
1419 | } | 1425 | } |
@@ -1440,7 +1446,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, | |||
1440 | if (vcs & GT_RENDER_USER_INTERRUPT) | 1446 | if (vcs & GT_RENDER_USER_INTERRUPT) |
1441 | notify_ring(dev, ring); | 1447 | notify_ring(dev, ring); |
1442 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) | 1448 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
1443 | intel_execlists_handle_ctx_events(ring); | 1449 | intel_lrc_irq_handler(ring); |
1444 | } else | 1450 | } else |
1445 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | 1451 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); |
1446 | } | 1452 | } |
@@ -2753,18 +2759,18 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) | |||
2753 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | 2759 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2754 | } | 2760 | } |
2755 | 2761 | ||
2756 | static u32 | 2762 | static struct drm_i915_gem_request * |
2757 | ring_last_seqno(struct intel_engine_cs *ring) | 2763 | ring_last_request(struct intel_engine_cs *ring) |
2758 | { | 2764 | { |
2759 | return list_entry(ring->request_list.prev, | 2765 | return list_entry(ring->request_list.prev, |
2760 | struct drm_i915_gem_request, list)->seqno; | 2766 | struct drm_i915_gem_request, list); |
2761 | } | 2767 | } |
2762 | 2768 | ||
2763 | static bool | 2769 | static bool |
2764 | ring_idle(struct intel_engine_cs *ring, u32 seqno) | 2770 | ring_idle(struct intel_engine_cs *ring) |
2765 | { | 2771 | { |
2766 | return (list_empty(&ring->request_list) || | 2772 | return (list_empty(&ring->request_list) || |
2767 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | 2773 | i915_gem_request_completed(ring_last_request(ring), false)); |
2768 | } | 2774 | } |
2769 | 2775 | ||
2770 | static bool | 2776 | static bool |
@@ -2984,7 +2990,7 @@ static void i915_hangcheck_elapsed(unsigned long data) | |||
2984 | acthd = intel_ring_get_active_head(ring); | 2990 | acthd = intel_ring_get_active_head(ring); |
2985 | 2991 | ||
2986 | if (ring->hangcheck.seqno == seqno) { | 2992 | if (ring->hangcheck.seqno == seqno) { |
2987 | if (ring_idle(ring, seqno)) { | 2993 | if (ring_idle(ring)) { |
2988 | ring->hangcheck.action = HANGCHECK_IDLE; | 2994 | ring->hangcheck.action = HANGCHECK_IDLE; |
2989 | 2995 | ||
2990 | if (waitqueue_active(&ring->irq_queue)) { | 2996 | if (waitqueue_active(&ring->irq_queue)) { |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index c91cb2033cc5..07252d8dc726 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -35,7 +35,7 @@ struct i915_params i915 __read_mostly = { | |||
35 | .vbt_sdvo_panel_type = -1, | 35 | .vbt_sdvo_panel_type = -1, |
36 | .enable_rc6 = -1, | 36 | .enable_rc6 = -1, |
37 | .enable_fbc = -1, | 37 | .enable_fbc = -1, |
38 | .enable_execlists = 0, | 38 | .enable_execlists = -1, |
39 | .enable_hangcheck = true, | 39 | .enable_hangcheck = true, |
40 | .enable_ppgtt = -1, | 40 | .enable_ppgtt = -1, |
41 | .enable_psr = 0, | 41 | .enable_psr = 0, |
@@ -51,6 +51,7 @@ struct i915_params i915 __read_mostly = { | |||
51 | .disable_vtd_wa = 0, | 51 | .disable_vtd_wa = 0, |
52 | .use_mmio_flip = 0, | 52 | .use_mmio_flip = 0, |
53 | .mmio_debug = 0, | 53 | .mmio_debug = 0, |
54 | .verbose_state_checks = 1, | ||
54 | }; | 55 | }; |
55 | 56 | ||
56 | module_param_named(modeset, i915.modeset, int, 0400); | 57 | module_param_named(modeset, i915.modeset, int, 0400); |
@@ -122,7 +123,7 @@ MODULE_PARM_DESC(enable_ppgtt, | |||
122 | module_param_named(enable_execlists, i915.enable_execlists, int, 0400); | 123 | module_param_named(enable_execlists, i915.enable_execlists, int, 0400); |
123 | MODULE_PARM_DESC(enable_execlists, | 124 | MODULE_PARM_DESC(enable_execlists, |
124 | "Override execlists usage. " | 125 | "Override execlists usage. " |
125 | "(-1=auto, 0=disabled [default], 1=enabled)"); | 126 | "(-1=auto [default], 0=disabled, 1=enabled)"); |
126 | 127 | ||
127 | module_param_named(enable_psr, i915.enable_psr, int, 0600); | 128 | module_param_named(enable_psr, i915.enable_psr, int, 0600); |
128 | MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); | 129 | MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); |
@@ -173,3 +174,7 @@ module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); | |||
173 | MODULE_PARM_DESC(mmio_debug, | 174 | MODULE_PARM_DESC(mmio_debug, |
174 | "Enable the MMIO debug code (default: false). This may negatively " | 175 | "Enable the MMIO debug code (default: false). This may negatively " |
175 | "affect performance."); | 176 | "affect performance."); |
177 | |||
178 | module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); | ||
179 | MODULE_PARM_DESC(verbose_state_checks, | ||
180 | "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 172de3b3433b..40ca873a05ad 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) | 31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
32 | #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ | 32 | #define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ |
33 | (pipe) == PIPE_B ? (b) : (c)) | 33 | (pipe) == PIPE_B ? (b) : (c)) |
34 | #define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \ | ||
35 | (port) == PORT_B ? (b) : (c)) | ||
34 | 36 | ||
35 | #define _MASKED_FIELD(mask, value) ({ \ | 37 | #define _MASKED_FIELD(mask, value) ({ \ |
36 | if (__builtin_constant_p(mask)) \ | 38 | if (__builtin_constant_p(mask)) \ |
@@ -217,6 +219,8 @@ | |||
217 | #define INSTR_SUBCLIENT_SHIFT 27 | 219 | #define INSTR_SUBCLIENT_SHIFT 27 |
218 | #define INSTR_SUBCLIENT_MASK 0x18000000 | 220 | #define INSTR_SUBCLIENT_MASK 0x18000000 |
219 | #define INSTR_MEDIA_SUBCLIENT 0x2 | 221 | #define INSTR_MEDIA_SUBCLIENT 0x2 |
222 | #define INSTR_26_TO_24_MASK 0x7000000 | ||
223 | #define INSTR_26_TO_24_SHIFT 24 | ||
220 | 224 | ||
221 | /* | 225 | /* |
222 | * Memory interface instructions used by the kernel | 226 | * Memory interface instructions used by the kernel |
@@ -246,6 +250,7 @@ | |||
246 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) | 250 | #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) |
247 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) | 251 | #define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0) |
248 | #define MI_SUSPEND_FLUSH_EN (1<<0) | 252 | #define MI_SUSPEND_FLUSH_EN (1<<0) |
253 | #define MI_SET_APPID MI_INSTR(0x0e, 0) | ||
249 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) | 254 | #define MI_OVERLAY_FLIP MI_INSTR(0x11, 0) |
250 | #define MI_OVERLAY_CONTINUE (0x0<<21) | 255 | #define MI_OVERLAY_CONTINUE (0x0<<21) |
251 | #define MI_OVERLAY_ON (0x1<<21) | 256 | #define MI_OVERLAY_ON (0x1<<21) |
@@ -303,8 +308,9 @@ | |||
303 | #define MI_SEMAPHORE_POLL (1<<15) | 308 | #define MI_SEMAPHORE_POLL (1<<15) |
304 | #define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) | 309 | #define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) |
305 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) | 310 | #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) |
306 | #define MI_STORE_DWORD_IMM_GEN8 MI_INSTR(0x20, 2) | 311 | #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) |
307 | #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ | 312 | #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ |
313 | #define MI_USE_GGTT (1 << 22) /* g4x+ */ | ||
308 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) | 314 | #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) |
309 | #define MI_STORE_DWORD_INDEX_SHIFT 2 | 315 | #define MI_STORE_DWORD_INDEX_SHIFT 2 |
310 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: | 316 | /* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: |
@@ -470,17 +476,18 @@ | |||
470 | */ | 476 | */ |
471 | #define BCS_SWCTRL 0x22200 | 477 | #define BCS_SWCTRL 0x22200 |
472 | 478 | ||
473 | #define HS_INVOCATION_COUNT 0x2300 | 479 | #define GPGPU_THREADS_DISPATCHED 0x2290 |
474 | #define DS_INVOCATION_COUNT 0x2308 | 480 | #define HS_INVOCATION_COUNT 0x2300 |
475 | #define IA_VERTICES_COUNT 0x2310 | 481 | #define DS_INVOCATION_COUNT 0x2308 |
476 | #define IA_PRIMITIVES_COUNT 0x2318 | 482 | #define IA_VERTICES_COUNT 0x2310 |
477 | #define VS_INVOCATION_COUNT 0x2320 | 483 | #define IA_PRIMITIVES_COUNT 0x2318 |
478 | #define GS_INVOCATION_COUNT 0x2328 | 484 | #define VS_INVOCATION_COUNT 0x2320 |
479 | #define GS_PRIMITIVES_COUNT 0x2330 | 485 | #define GS_INVOCATION_COUNT 0x2328 |
480 | #define CL_INVOCATION_COUNT 0x2338 | 486 | #define GS_PRIMITIVES_COUNT 0x2330 |
481 | #define CL_PRIMITIVES_COUNT 0x2340 | 487 | #define CL_INVOCATION_COUNT 0x2338 |
482 | #define PS_INVOCATION_COUNT 0x2348 | 488 | #define CL_PRIMITIVES_COUNT 0x2340 |
483 | #define PS_DEPTH_COUNT 0x2350 | 489 | #define PS_INVOCATION_COUNT 0x2348 |
490 | #define PS_DEPTH_COUNT 0x2350 | ||
484 | 491 | ||
485 | /* There are the 4 64-bit counter registers, one for each stream output */ | 492 | /* There are the 4 64-bit counter registers, one for each stream output */ |
486 | #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8) | 493 | #define GEN7_SO_NUM_PRIMS_WRITTEN(n) (0x5200 + (n) * 8) |
@@ -1509,7 +1516,7 @@ enum punit_power_well { | |||
1509 | #define I915_ISP_INTERRUPT (1<<22) | 1516 | #define I915_ISP_INTERRUPT (1<<22) |
1510 | #define I915_LPE_PIPE_B_INTERRUPT (1<<21) | 1517 | #define I915_LPE_PIPE_B_INTERRUPT (1<<21) |
1511 | #define I915_LPE_PIPE_A_INTERRUPT (1<<20) | 1518 | #define I915_LPE_PIPE_A_INTERRUPT (1<<20) |
1512 | #define I915_MIPIB_INTERRUPT (1<<19) | 1519 | #define I915_MIPIC_INTERRUPT (1<<19) |
1513 | #define I915_MIPIA_INTERRUPT (1<<18) | 1520 | #define I915_MIPIA_INTERRUPT (1<<18) |
1514 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) | 1521 | #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) |
1515 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) | 1522 | #define I915_DISPLAY_PORT_INTERRUPT (1<<17) |
@@ -2539,6 +2546,42 @@ enum punit_power_well { | |||
2539 | #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) | 2546 | #define PIPESRC(trans) _TRANSCODER2(trans, _PIPEASRC) |
2540 | #define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A) | 2547 | #define PIPE_MULT(trans) _TRANSCODER2(trans, _PIPE_MULT_A) |
2541 | 2548 | ||
2549 | /* VLV eDP PSR registers */ | ||
2550 | #define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090) | ||
2551 | #define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090) | ||
2552 | #define VLV_EDP_PSR_ENABLE (1<<0) | ||
2553 | #define VLV_EDP_PSR_RESET (1<<1) | ||
2554 | #define VLV_EDP_PSR_MODE_MASK (7<<2) | ||
2555 | #define VLV_EDP_PSR_MODE_HW_TIMER (1<<3) | ||
2556 | #define VLV_EDP_PSR_MODE_SW_TIMER (1<<2) | ||
2557 | #define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1<<7) | ||
2558 | #define VLV_EDP_PSR_ACTIVE_ENTRY (1<<8) | ||
2559 | #define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1<<9) | ||
2560 | #define VLV_EDP_PSR_DBL_FRAME (1<<10) | ||
2561 | #define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff<<16) | ||
2562 | #define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16 | ||
2563 | #define VLV_PSRCTL(pipe) _PIPE(pipe, _PSRCTLA, _PSRCTLB) | ||
2564 | |||
2565 | #define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0) | ||
2566 | #define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0) | ||
2567 | #define VLV_EDP_PSR_SDP_FREQ_MASK (3<<30) | ||
2568 | #define VLV_EDP_PSR_SDP_FREQ_ONCE (1<<31) | ||
2569 | #define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1<<30) | ||
2570 | #define VLV_VSCSDP(pipe) _PIPE(pipe, _VSCSDPA, _VSCSDPB) | ||
2571 | |||
2572 | #define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094) | ||
2573 | #define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094) | ||
2574 | #define VLV_EDP_PSR_LAST_STATE_MASK (7<<3) | ||
2575 | #define VLV_EDP_PSR_CURR_STATE_MASK 7 | ||
2576 | #define VLV_EDP_PSR_DISABLED (0<<0) | ||
2577 | #define VLV_EDP_PSR_INACTIVE (1<<0) | ||
2578 | #define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2<<0) | ||
2579 | #define VLV_EDP_PSR_ACTIVE_NORFB_UP (3<<0) | ||
2580 | #define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4<<0) | ||
2581 | #define VLV_EDP_PSR_EXIT (5<<0) | ||
2582 | #define VLV_EDP_PSR_IN_TRANS (1<<7) | ||
2583 | #define VLV_PSRSTAT(pipe) _PIPE(pipe, _PSRSTATA, _PSRSTATB) | ||
2584 | |||
2542 | /* HSW+ eDP PSR registers */ | 2585 | /* HSW+ eDP PSR registers */ |
2543 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) | 2586 | #define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800) |
2544 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) | 2587 | #define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0) |
@@ -2762,7 +2805,8 @@ enum punit_power_well { | |||
2762 | #define DC_BALANCE_RESET (1 << 25) | 2805 | #define DC_BALANCE_RESET (1 << 25) |
2763 | #define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) | 2806 | #define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154) |
2764 | #define DC_BALANCE_RESET_VLV (1 << 31) | 2807 | #define DC_BALANCE_RESET_VLV (1 << 31) |
2765 | #define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0) | 2808 | #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) |
2809 | #define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ | ||
2766 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) | 2810 | #define PIPE_B_SCRAMBLE_RESET (1 << 1) |
2767 | #define PIPE_A_SCRAMBLE_RESET (1 << 0) | 2811 | #define PIPE_A_SCRAMBLE_RESET (1 << 0) |
2768 | 2812 | ||
@@ -6006,6 +6050,10 @@ enum punit_power_well { | |||
6006 | #define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) | 6050 | #define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) |
6007 | #define VLV_PWRDWNUPCTL 0xA294 | 6051 | #define VLV_PWRDWNUPCTL 0xA294 |
6008 | 6052 | ||
6053 | #define VLV_CHICKEN_3 (VLV_DISPLAY_BASE + 0x7040C) | ||
6054 | #define PIXEL_OVERLAP_CNT_MASK (3 << 30) | ||
6055 | #define PIXEL_OVERLAP_CNT_SHIFT 30 | ||
6056 | |||
6009 | #define GEN6_PMISR 0x44020 | 6057 | #define GEN6_PMISR 0x44020 |
6010 | #define GEN6_PMIMR 0x44024 /* rps_lock */ | 6058 | #define GEN6_PMIMR 0x44024 /* rps_lock */ |
6011 | #define GEN6_PMIIR 0x44028 | 6059 | #define GEN6_PMIIR 0x44028 |
@@ -6631,29 +6679,31 @@ enum punit_power_well { | |||
6631 | #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) | 6679 | #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) |
6632 | #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) | 6680 | #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) |
6633 | 6681 | ||
6634 | /* VLV MIPI registers */ | 6682 | /* MIPI DSI registers */ |
6683 | |||
6684 | #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ | ||
6635 | 6685 | ||
6636 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) | 6686 | #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) |
6637 | #define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) | 6687 | #define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) |
6638 | #define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \ | 6688 | #define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) |
6639 | _MIPIB_PORT_CTRL) | 6689 | #define DPI_ENABLE (1 << 31) /* A + C */ |
6640 | #define DPI_ENABLE (1 << 31) /* A + B */ | ||
6641 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 | 6690 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 |
6642 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) | 6691 | #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) |
6692 | #define DUAL_LINK_MODE_SHIFT 26 | ||
6643 | #define DUAL_LINK_MODE_MASK (1 << 26) | 6693 | #define DUAL_LINK_MODE_MASK (1 << 26) |
6644 | #define DUAL_LINK_MODE_FRONT_BACK (0 << 26) | 6694 | #define DUAL_LINK_MODE_FRONT_BACK (0 << 26) |
6645 | #define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) | 6695 | #define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) |
6646 | #define DITHERING_ENABLE (1 << 25) /* A + B */ | 6696 | #define DITHERING_ENABLE (1 << 25) /* A + C */ |
6647 | #define FLOPPED_HSTX (1 << 23) | 6697 | #define FLOPPED_HSTX (1 << 23) |
6648 | #define DE_INVERT (1 << 19) /* XXX */ | 6698 | #define DE_INVERT (1 << 19) /* XXX */ |
6649 | #define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 | 6699 | #define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 |
6650 | #define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) | 6700 | #define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) |
6651 | #define AFE_LATCHOUT (1 << 17) | 6701 | #define AFE_LATCHOUT (1 << 17) |
6652 | #define LP_OUTPUT_HOLD (1 << 16) | 6702 | #define LP_OUTPUT_HOLD (1 << 16) |
6653 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 | 6703 | #define MIPIC_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 |
6654 | #define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) | 6704 | #define MIPIC_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) |
6655 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11 | 6705 | #define MIPIC_MIPI4DPHY_DELAY_COUNT_SHIFT 11 |
6656 | #define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) | 6706 | #define MIPIC_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) |
6657 | #define CSB_SHIFT 9 | 6707 | #define CSB_SHIFT 9 |
6658 | #define CSB_MASK (3 << 9) | 6708 | #define CSB_MASK (3 << 9) |
6659 | #define CSB_20MHZ (0 << 9) | 6709 | #define CSB_20MHZ (0 << 9) |
@@ -6662,10 +6712,10 @@ enum punit_power_well { | |||
6662 | #define BANDGAP_MASK (1 << 8) | 6712 | #define BANDGAP_MASK (1 << 8) |
6663 | #define BANDGAP_PNW_CIRCUIT (0 << 8) | 6713 | #define BANDGAP_PNW_CIRCUIT (0 << 8) |
6664 | #define BANDGAP_LNC_CIRCUIT (1 << 8) | 6714 | #define BANDGAP_LNC_CIRCUIT (1 << 8) |
6665 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 | 6715 | #define MIPIC_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 |
6666 | #define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) | 6716 | #define MIPIC_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) |
6667 | #define TEARING_EFFECT_DELAY (1 << 4) /* A + B */ | 6717 | #define TEARING_EFFECT_DELAY (1 << 4) /* A + C */ |
6668 | #define TEARING_EFFECT_SHIFT 2 /* A + B */ | 6718 | #define TEARING_EFFECT_SHIFT 2 /* A + C */ |
6669 | #define TEARING_EFFECT_MASK (3 << 2) | 6719 | #define TEARING_EFFECT_MASK (3 << 2) |
6670 | #define TEARING_EFFECT_OFF (0 << 2) | 6720 | #define TEARING_EFFECT_OFF (0 << 2) |
6671 | #define TEARING_EFFECT_DSI (1 << 2) | 6721 | #define TEARING_EFFECT_DSI (1 << 2) |
@@ -6677,9 +6727,9 @@ enum punit_power_well { | |||
6677 | #define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) | 6727 | #define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) |
6678 | 6728 | ||
6679 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) | 6729 | #define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) |
6680 | #define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) | 6730 | #define _MIPIC_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) |
6681 | #define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \ | 6731 | #define MIPI_TEARING_CTRL(port) _MIPI_PORT(port, \ |
6682 | _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) | 6732 | _MIPIA_TEARING_CTRL, _MIPIC_TEARING_CTRL) |
6683 | #define TEARING_EFFECT_DELAY_SHIFT 0 | 6733 | #define TEARING_EFFECT_DELAY_SHIFT 0 |
6684 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) | 6734 | #define TEARING_EFFECT_DELAY_MASK (0xffff << 0) |
6685 | 6735 | ||
@@ -6689,9 +6739,9 @@ enum punit_power_well { | |||
6689 | /* MIPI DSI Controller and D-PHY registers */ | 6739 | /* MIPI DSI Controller and D-PHY registers */ |
6690 | 6740 | ||
6691 | #define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) | 6741 | #define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000) |
6692 | #define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) | 6742 | #define _MIPIC_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800) |
6693 | #define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \ | 6743 | #define MIPI_DEVICE_READY(port) _MIPI_PORT(port, _MIPIA_DEVICE_READY, \ |
6694 | _MIPIB_DEVICE_READY) | 6744 | _MIPIC_DEVICE_READY) |
6695 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ | 6745 | #define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ |
6696 | #define ULPS_STATE_MASK (3 << 1) | 6746 | #define ULPS_STATE_MASK (3 << 1) |
6697 | #define ULPS_STATE_ENTER (2 << 1) | 6747 | #define ULPS_STATE_ENTER (2 << 1) |
@@ -6700,13 +6750,13 @@ enum punit_power_well { | |||
6700 | #define DEVICE_READY (1 << 0) | 6750 | #define DEVICE_READY (1 << 0) |
6701 | 6751 | ||
6702 | #define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) | 6752 | #define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004) |
6703 | #define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) | 6753 | #define _MIPIC_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804) |
6704 | #define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \ | 6754 | #define MIPI_INTR_STAT(port) _MIPI_PORT(port, _MIPIA_INTR_STAT, \ |
6705 | _MIPIB_INTR_STAT) | 6755 | _MIPIC_INTR_STAT) |
6706 | #define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) | 6756 | #define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008) |
6707 | #define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) | 6757 | #define _MIPIC_INTR_EN (dev_priv->mipi_mmio_base + 0xb808) |
6708 | #define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \ | 6758 | #define MIPI_INTR_EN(port) _MIPI_PORT(port, _MIPIA_INTR_EN, \ |
6709 | _MIPIB_INTR_EN) | 6759 | _MIPIC_INTR_EN) |
6710 | #define TEARING_EFFECT (1 << 31) | 6760 | #define TEARING_EFFECT (1 << 31) |
6711 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) | 6761 | #define SPL_PKT_SENT_INTERRUPT (1 << 30) |
6712 | #define GEN_READ_DATA_AVAIL (1 << 29) | 6762 | #define GEN_READ_DATA_AVAIL (1 << 29) |
@@ -6741,9 +6791,9 @@ enum punit_power_well { | |||
6741 | #define RXSOT_ERROR (1 << 0) | 6791 | #define RXSOT_ERROR (1 << 0) |
6742 | 6792 | ||
6743 | #define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) | 6793 | #define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c) |
6744 | #define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) | 6794 | #define _MIPIC_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c) |
6745 | #define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \ | 6795 | #define MIPI_DSI_FUNC_PRG(port) _MIPI_PORT(port, _MIPIA_DSI_FUNC_PRG, \ |
6746 | _MIPIB_DSI_FUNC_PRG) | 6796 | _MIPIC_DSI_FUNC_PRG) |
6747 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) | 6797 | #define CMD_MODE_DATA_WIDTH_MASK (7 << 13) |
6748 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) | 6798 | #define CMD_MODE_NOT_SUPPORTED (0 << 13) |
6749 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) | 6799 | #define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) |
@@ -6765,93 +6815,93 @@ enum punit_power_well { | |||
6765 | #define DATA_LANES_PRG_REG_MASK (7 << 0) | 6815 | #define DATA_LANES_PRG_REG_MASK (7 << 0) |
6766 | 6816 | ||
6767 | #define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) | 6817 | #define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010) |
6768 | #define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) | 6818 | #define _MIPIC_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810) |
6769 | #define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \ | 6819 | #define MIPI_HS_TX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_HS_TX_TIMEOUT, \ |
6770 | _MIPIB_HS_TX_TIMEOUT) | 6820 | _MIPIC_HS_TX_TIMEOUT) |
6771 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff | 6821 | #define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff |
6772 | 6822 | ||
6773 | #define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) | 6823 | #define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014) |
6774 | #define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) | 6824 | #define _MIPIC_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814) |
6775 | #define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \ | 6825 | #define MIPI_LP_RX_TIMEOUT(port) _MIPI_PORT(port, _MIPIA_LP_RX_TIMEOUT, \ |
6776 | _MIPIB_LP_RX_TIMEOUT) | 6826 | _MIPIC_LP_RX_TIMEOUT) |
6777 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff | 6827 | #define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff |
6778 | 6828 | ||
6779 | #define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) | 6829 | #define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018) |
6780 | #define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) | 6830 | #define _MIPIC_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818) |
6781 | #define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \ | 6831 | #define MIPI_TURN_AROUND_TIMEOUT(port) _MIPI_PORT(port, \ |
6782 | _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) | 6832 | _MIPIA_TURN_AROUND_TIMEOUT, _MIPIC_TURN_AROUND_TIMEOUT) |
6783 | #define TURN_AROUND_TIMEOUT_MASK 0x3f | 6833 | #define TURN_AROUND_TIMEOUT_MASK 0x3f |
6784 | 6834 | ||
6785 | #define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) | 6835 | #define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c) |
6786 | #define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) | 6836 | #define _MIPIC_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c) |
6787 | #define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \ | 6837 | #define MIPI_DEVICE_RESET_TIMER(port) _MIPI_PORT(port, \ |
6788 | _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) | 6838 | _MIPIA_DEVICE_RESET_TIMER, _MIPIC_DEVICE_RESET_TIMER) |
6789 | #define DEVICE_RESET_TIMER_MASK 0xffff | 6839 | #define DEVICE_RESET_TIMER_MASK 0xffff |
6790 | 6840 | ||
6791 | #define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) | 6841 | #define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020) |
6792 | #define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) | 6842 | #define _MIPIC_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820) |
6793 | #define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \ | 6843 | #define MIPI_DPI_RESOLUTION(port) _MIPI_PORT(port, _MIPIA_DPI_RESOLUTION, \ |
6794 | _MIPIB_DPI_RESOLUTION) | 6844 | _MIPIC_DPI_RESOLUTION) |
6795 | #define VERTICAL_ADDRESS_SHIFT 16 | 6845 | #define VERTICAL_ADDRESS_SHIFT 16 |
6796 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) | 6846 | #define VERTICAL_ADDRESS_MASK (0xffff << 16) |
6797 | #define HORIZONTAL_ADDRESS_SHIFT 0 | 6847 | #define HORIZONTAL_ADDRESS_SHIFT 0 |
6798 | #define HORIZONTAL_ADDRESS_MASK 0xffff | 6848 | #define HORIZONTAL_ADDRESS_MASK 0xffff |
6799 | 6849 | ||
6800 | #define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) | 6850 | #define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024) |
6801 | #define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) | 6851 | #define _MIPIC_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824) |
6802 | #define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \ | 6852 | #define MIPI_DBI_FIFO_THROTTLE(port) _MIPI_PORT(port, \ |
6803 | _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) | 6853 | _MIPIA_DBI_FIFO_THROTTLE, _MIPIC_DBI_FIFO_THROTTLE) |
6804 | #define DBI_FIFO_EMPTY_HALF (0 << 0) | 6854 | #define DBI_FIFO_EMPTY_HALF (0 << 0) |
6805 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) | 6855 | #define DBI_FIFO_EMPTY_QUARTER (1 << 0) |
6806 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) | 6856 | #define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) |
6807 | 6857 | ||
6808 | /* regs below are bits 15:0 */ | 6858 | /* regs below are bits 15:0 */ |
6809 | #define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) | 6859 | #define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028) |
6810 | #define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) | 6860 | #define _MIPIC_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828) |
6811 | #define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ | 6861 | #define MIPI_HSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ |
6812 | _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) | 6862 | _MIPIA_HSYNC_PADDING_COUNT, _MIPIC_HSYNC_PADDING_COUNT) |
6813 | 6863 | ||
6814 | #define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) | 6864 | #define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c) |
6815 | #define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) | 6865 | #define _MIPIC_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c) |
6816 | #define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \ | 6866 | #define MIPI_HBP_COUNT(port) _MIPI_PORT(port, _MIPIA_HBP_COUNT, \ |
6817 | _MIPIB_HBP_COUNT) | 6867 | _MIPIC_HBP_COUNT) |
6818 | 6868 | ||
6819 | #define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) | 6869 | #define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030) |
6820 | #define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) | 6870 | #define _MIPIC_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830) |
6821 | #define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \ | 6871 | #define MIPI_HFP_COUNT(port) _MIPI_PORT(port, _MIPIA_HFP_COUNT, \ |
6822 | _MIPIB_HFP_COUNT) | 6872 | _MIPIC_HFP_COUNT) |
6823 | 6873 | ||
6824 | #define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) | 6874 | #define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034) |
6825 | #define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) | 6875 | #define _MIPIC_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834) |
6826 | #define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \ | 6876 | #define MIPI_HACTIVE_AREA_COUNT(port) _MIPI_PORT(port, \ |
6827 | _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) | 6877 | _MIPIA_HACTIVE_AREA_COUNT, _MIPIC_HACTIVE_AREA_COUNT) |
6828 | 6878 | ||
6829 | #define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) | 6879 | #define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038) |
6830 | #define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) | 6880 | #define _MIPIC_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838) |
6831 | #define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \ | 6881 | #define MIPI_VSYNC_PADDING_COUNT(port) _MIPI_PORT(port, \ |
6832 | _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) | 6882 | _MIPIA_VSYNC_PADDING_COUNT, _MIPIC_VSYNC_PADDING_COUNT) |
6833 | 6883 | ||
6834 | #define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) | 6884 | #define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c) |
6835 | #define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) | 6885 | #define _MIPIC_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c) |
6836 | #define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \ | 6886 | #define MIPI_VBP_COUNT(port) _MIPI_PORT(port, _MIPIA_VBP_COUNT, \ |
6837 | _MIPIB_VBP_COUNT) | 6887 | _MIPIC_VBP_COUNT) |
6838 | 6888 | ||
6839 | #define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) | 6889 | #define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040) |
6840 | #define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) | 6890 | #define _MIPIC_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840) |
6841 | #define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \ | 6891 | #define MIPI_VFP_COUNT(port) _MIPI_PORT(port, _MIPIA_VFP_COUNT, \ |
6842 | _MIPIB_VFP_COUNT) | 6892 | _MIPIC_VFP_COUNT) |
6843 | 6893 | ||
6844 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) | 6894 | #define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044) |
6845 | #define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) | 6895 | #define _MIPIC_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844) |
6846 | #define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \ | 6896 | #define MIPI_HIGH_LOW_SWITCH_COUNT(port) _MIPI_PORT(port, \ |
6847 | _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) | 6897 | _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIC_HIGH_LOW_SWITCH_COUNT) |
6848 | 6898 | ||
6849 | /* regs above are bits 15:0 */ | 6899 | /* regs above are bits 15:0 */ |
6850 | 6900 | ||
6851 | #define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) | 6901 | #define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048) |
6852 | #define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) | 6902 | #define _MIPIC_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848) |
6853 | #define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \ | 6903 | #define MIPI_DPI_CONTROL(port) _MIPI_PORT(port, _MIPIA_DPI_CONTROL, \ |
6854 | _MIPIB_DPI_CONTROL) | 6904 | _MIPIC_DPI_CONTROL) |
6855 | #define DPI_LP_MODE (1 << 6) | 6905 | #define DPI_LP_MODE (1 << 6) |
6856 | #define BACKLIGHT_OFF (1 << 5) | 6906 | #define BACKLIGHT_OFF (1 << 5) |
6857 | #define BACKLIGHT_ON (1 << 4) | 6907 | #define BACKLIGHT_ON (1 << 4) |
@@ -6861,30 +6911,30 @@ enum punit_power_well { | |||
6861 | #define SHUTDOWN (1 << 0) | 6911 | #define SHUTDOWN (1 << 0) |
6862 | 6912 | ||
6863 | #define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) | 6913 | #define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c) |
6864 | #define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) | 6914 | #define _MIPIC_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c) |
6865 | #define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \ | 6915 | #define MIPI_DPI_DATA(port) _MIPI_PORT(port, _MIPIA_DPI_DATA, \ |
6866 | _MIPIB_DPI_DATA) | 6916 | _MIPIC_DPI_DATA) |
6867 | #define COMMAND_BYTE_SHIFT 0 | 6917 | #define COMMAND_BYTE_SHIFT 0 |
6868 | #define COMMAND_BYTE_MASK (0x3f << 0) | 6918 | #define COMMAND_BYTE_MASK (0x3f << 0) |
6869 | 6919 | ||
6870 | #define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) | 6920 | #define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050) |
6871 | #define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) | 6921 | #define _MIPIC_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850) |
6872 | #define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \ | 6922 | #define MIPI_INIT_COUNT(port) _MIPI_PORT(port, _MIPIA_INIT_COUNT, \ |
6873 | _MIPIB_INIT_COUNT) | 6923 | _MIPIC_INIT_COUNT) |
6874 | #define MASTER_INIT_TIMER_SHIFT 0 | 6924 | #define MASTER_INIT_TIMER_SHIFT 0 |
6875 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) | 6925 | #define MASTER_INIT_TIMER_MASK (0xffff << 0) |
6876 | 6926 | ||
6877 | #define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) | 6927 | #define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054) |
6878 | #define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) | 6928 | #define _MIPIC_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854) |
6879 | #define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \ | 6929 | #define MIPI_MAX_RETURN_PKT_SIZE(port) _MIPI_PORT(port, \ |
6880 | _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) | 6930 | _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIC_MAX_RETURN_PKT_SIZE) |
6881 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 | 6931 | #define MAX_RETURN_PKT_SIZE_SHIFT 0 |
6882 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) | 6932 | #define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) |
6883 | 6933 | ||
6884 | #define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) | 6934 | #define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058) |
6885 | #define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) | 6935 | #define _MIPIC_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858) |
6886 | #define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \ | 6936 | #define MIPI_VIDEO_MODE_FORMAT(port) _MIPI_PORT(port, \ |
6887 | _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) | 6937 | _MIPIA_VIDEO_MODE_FORMAT, _MIPIC_VIDEO_MODE_FORMAT) |
6888 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) | 6938 | #define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) |
6889 | #define DISABLE_VIDEO_BTA (1 << 3) | 6939 | #define DISABLE_VIDEO_BTA (1 << 3) |
6890 | #define IP_TG_CONFIG (1 << 2) | 6940 | #define IP_TG_CONFIG (1 << 2) |
@@ -6893,9 +6943,9 @@ enum punit_power_well { | |||
6893 | #define VIDEO_MODE_BURST (3 << 0) | 6943 | #define VIDEO_MODE_BURST (3 << 0) |
6894 | 6944 | ||
6895 | #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) | 6945 | #define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) |
6896 | #define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) | 6946 | #define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) |
6897 | #define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \ | 6947 | #define MIPI_EOT_DISABLE(port) _MIPI_PORT(port, _MIPIA_EOT_DISABLE, \ |
6898 | _MIPIB_EOT_DISABLE) | 6948 | _MIPIC_EOT_DISABLE) |
6899 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) | 6949 | #define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) |
6900 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) | 6950 | #define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) |
6901 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) | 6951 | #define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) |
@@ -6906,32 +6956,32 @@ enum punit_power_well { | |||
6906 | #define EOT_DISABLE (1 << 0) | 6956 | #define EOT_DISABLE (1 << 0) |
6907 | 6957 | ||
6908 | #define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) | 6958 | #define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060) |
6909 | #define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) | 6959 | #define _MIPIC_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860) |
6910 | #define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \ | 6960 | #define MIPI_LP_BYTECLK(port) _MIPI_PORT(port, _MIPIA_LP_BYTECLK, \ |
6911 | _MIPIB_LP_BYTECLK) | 6961 | _MIPIC_LP_BYTECLK) |
6912 | #define LP_BYTECLK_SHIFT 0 | 6962 | #define LP_BYTECLK_SHIFT 0 |
6913 | #define LP_BYTECLK_MASK (0xffff << 0) | 6963 | #define LP_BYTECLK_MASK (0xffff << 0) |
6914 | 6964 | ||
6915 | /* bits 31:0 */ | 6965 | /* bits 31:0 */ |
6916 | #define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) | 6966 | #define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064) |
6917 | #define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) | 6967 | #define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864) |
6918 | #define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \ | 6968 | #define MIPI_LP_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_LP_GEN_DATA, \ |
6919 | _MIPIB_LP_GEN_DATA) | 6969 | _MIPIC_LP_GEN_DATA) |
6920 | 6970 | ||
6921 | /* bits 31:0 */ | 6971 | /* bits 31:0 */ |
6922 | #define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) | 6972 | #define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068) |
6923 | #define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) | 6973 | #define _MIPIC_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868) |
6924 | #define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \ | 6974 | #define MIPI_HS_GEN_DATA(port) _MIPI_PORT(port, _MIPIA_HS_GEN_DATA, \ |
6925 | _MIPIB_HS_GEN_DATA) | 6975 | _MIPIC_HS_GEN_DATA) |
6926 | 6976 | ||
6927 | #define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) | 6977 | #define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c) |
6928 | #define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) | 6978 | #define _MIPIC_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c) |
6929 | #define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \ | 6979 | #define MIPI_LP_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_LP_GEN_CTRL, \ |
6930 | _MIPIB_LP_GEN_CTRL) | 6980 | _MIPIC_LP_GEN_CTRL) |
6931 | #define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) | 6981 | #define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070) |
6932 | #define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) | 6982 | #define _MIPIC_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870) |
6933 | #define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \ | 6983 | #define MIPI_HS_GEN_CTRL(port) _MIPI_PORT(port, _MIPIA_HS_GEN_CTRL, \ |
6934 | _MIPIB_HS_GEN_CTRL) | 6984 | _MIPIC_HS_GEN_CTRL) |
6935 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 | 6985 | #define LONG_PACKET_WORD_COUNT_SHIFT 8 |
6936 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) | 6986 | #define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) |
6937 | #define SHORT_PACKET_PARAM_SHIFT 8 | 6987 | #define SHORT_PACKET_PARAM_SHIFT 8 |
@@ -6943,9 +6993,9 @@ enum punit_power_well { | |||
6943 | /* data type values, see include/video/mipi_display.h */ | 6993 | /* data type values, see include/video/mipi_display.h */ |
6944 | 6994 | ||
6945 | #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) | 6995 | #define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074) |
6946 | #define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) | 6996 | #define _MIPIC_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874) |
6947 | #define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \ | 6997 | #define MIPI_GEN_FIFO_STAT(port) _MIPI_PORT(port, _MIPIA_GEN_FIFO_STAT, \ |
6948 | _MIPIB_GEN_FIFO_STAT) | 6998 | _MIPIC_GEN_FIFO_STAT) |
6949 | #define DPI_FIFO_EMPTY (1 << 28) | 6999 | #define DPI_FIFO_EMPTY (1 << 28) |
6950 | #define DBI_FIFO_EMPTY (1 << 27) | 7000 | #define DBI_FIFO_EMPTY (1 << 27) |
6951 | #define LP_CTRL_FIFO_EMPTY (1 << 26) | 7001 | #define LP_CTRL_FIFO_EMPTY (1 << 26) |
@@ -6962,17 +7012,17 @@ enum punit_power_well { | |||
6962 | #define HS_DATA_FIFO_FULL (1 << 0) | 7012 | #define HS_DATA_FIFO_FULL (1 << 0) |
6963 | 7013 | ||
6964 | #define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) | 7014 | #define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078) |
6965 | #define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) | 7015 | #define _MIPIC_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878) |
6966 | #define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \ | 7016 | #define MIPI_HS_LP_DBI_ENABLE(port) _MIPI_PORT(port, \ |
6967 | _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) | 7017 | _MIPIA_HS_LS_DBI_ENABLE, _MIPIC_HS_LS_DBI_ENABLE) |
6968 | #define DBI_HS_LP_MODE_MASK (1 << 0) | 7018 | #define DBI_HS_LP_MODE_MASK (1 << 0) |
6969 | #define DBI_LP_MODE (1 << 0) | 7019 | #define DBI_LP_MODE (1 << 0) |
6970 | #define DBI_HS_MODE (0 << 0) | 7020 | #define DBI_HS_MODE (0 << 0) |
6971 | 7021 | ||
6972 | #define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) | 7022 | #define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080) |
6973 | #define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) | 7023 | #define _MIPIC_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880) |
6974 | #define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \ | 7024 | #define MIPI_DPHY_PARAM(port) _MIPI_PORT(port, _MIPIA_DPHY_PARAM, \ |
6975 | _MIPIB_DPHY_PARAM) | 7025 | _MIPIC_DPHY_PARAM) |
6976 | #define EXIT_ZERO_COUNT_SHIFT 24 | 7026 | #define EXIT_ZERO_COUNT_SHIFT 24 |
6977 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) | 7027 | #define EXIT_ZERO_COUNT_MASK (0x3f << 24) |
6978 | #define TRAIL_COUNT_SHIFT 16 | 7028 | #define TRAIL_COUNT_SHIFT 16 |
@@ -6984,36 +7034,36 @@ enum punit_power_well { | |||
6984 | 7034 | ||
6985 | /* bits 31:0 */ | 7035 | /* bits 31:0 */ |
6986 | #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) | 7036 | #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) |
6987 | #define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) | 7037 | #define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) |
6988 | #define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \ | 7038 | #define MIPI_DBI_BW_CTRL(port) _MIPI_PORT(port, _MIPIA_DBI_BW_CTRL, \ |
6989 | _MIPIB_DBI_BW_CTRL) | 7039 | _MIPIC_DBI_BW_CTRL) |
6990 | 7040 | ||
6991 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ | 7041 | #define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ |
6992 | + 0xb088) | 7042 | + 0xb088) |
6993 | #define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ | 7043 | #define _MIPIC_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \ |
6994 | + 0xb888) | 7044 | + 0xb888) |
6995 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \ | 7045 | #define MIPI_CLK_LANE_SWITCH_TIME_CNT(port) _MIPI_PORT(port, \ |
6996 | _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) | 7046 | _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIC_CLK_LANE_SWITCH_TIME_CNT) |
6997 | #define LP_HS_SSW_CNT_SHIFT 16 | 7047 | #define LP_HS_SSW_CNT_SHIFT 16 |
6998 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) | 7048 | #define LP_HS_SSW_CNT_MASK (0xffff << 16) |
6999 | #define HS_LP_PWR_SW_CNT_SHIFT 0 | 7049 | #define HS_LP_PWR_SW_CNT_SHIFT 0 |
7000 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) | 7050 | #define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) |
7001 | 7051 | ||
7002 | #define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) | 7052 | #define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c) |
7003 | #define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) | 7053 | #define _MIPIC_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c) |
7004 | #define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \ | 7054 | #define MIPI_STOP_STATE_STALL(port) _MIPI_PORT(port, \ |
7005 | _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) | 7055 | _MIPIA_STOP_STATE_STALL, _MIPIC_STOP_STATE_STALL) |
7006 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 | 7056 | #define STOP_STATE_STALL_COUNTER_SHIFT 0 |
7007 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) | 7057 | #define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) |
7008 | 7058 | ||
7009 | #define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) | 7059 | #define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090) |
7010 | #define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) | 7060 | #define _MIPIC_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890) |
7011 | #define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \ | 7061 | #define MIPI_INTR_STAT_REG_1(port) _MIPI_PORT(port, \ |
7012 | _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) | 7062 | _MIPIA_INTR_STAT_REG_1, _MIPIC_INTR_STAT_REG_1) |
7013 | #define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) | 7063 | #define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094) |
7014 | #define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) | 7064 | #define _MIPIC_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894) |
7015 | #define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \ | 7065 | #define MIPI_INTR_EN_REG_1(port) _MIPI_PORT(port, _MIPIA_INTR_EN_REG_1, \ |
7016 | _MIPIB_INTR_EN_REG_1) | 7066 | _MIPIC_INTR_EN_REG_1) |
7017 | #define RX_CONTENTION_DETECTED (1 << 0) | 7067 | #define RX_CONTENTION_DETECTED (1 << 0) |
7018 | 7068 | ||
7019 | /* XXX: only pipe A ?!? */ | 7069 | /* XXX: only pipe A ?!? */ |
@@ -7032,9 +7082,9 @@ enum punit_power_well { | |||
7032 | /* MIPI adapter registers */ | 7082 | /* MIPI adapter registers */ |
7033 | 7083 | ||
7034 | #define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) | 7084 | #define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104) |
7035 | #define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904) | 7085 | #define _MIPIC_CTRL (dev_priv->mipi_mmio_base + 0xb904) |
7036 | #define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \ | 7086 | #define MIPI_CTRL(port) _MIPI_PORT(port, _MIPIA_CTRL, \ |
7037 | _MIPIB_CTRL) | 7087 | _MIPIC_CTRL) |
7038 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ | 7088 | #define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ |
7039 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) | 7089 | #define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) |
7040 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) | 7090 | #define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) |
@@ -7047,24 +7097,24 @@ enum punit_power_well { | |||
7047 | #define RGB_FLIP_TO_BGR (1 << 2) | 7097 | #define RGB_FLIP_TO_BGR (1 << 2) |
7048 | 7098 | ||
7049 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) | 7099 | #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) |
7050 | #define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) | 7100 | #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) |
7051 | #define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \ | 7101 | #define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ |
7052 | _MIPIB_DATA_ADDRESS) | 7102 | _MIPIC_DATA_ADDRESS) |
7053 | #define DATA_MEM_ADDRESS_SHIFT 5 | 7103 | #define DATA_MEM_ADDRESS_SHIFT 5 |
7054 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) | 7104 | #define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) |
7055 | #define DATA_VALID (1 << 0) | 7105 | #define DATA_VALID (1 << 0) |
7056 | 7106 | ||
7057 | #define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) | 7107 | #define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c) |
7058 | #define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) | 7108 | #define _MIPIC_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c) |
7059 | #define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \ | 7109 | #define MIPI_DATA_LENGTH(port) _MIPI_PORT(port, _MIPIA_DATA_LENGTH, \ |
7060 | _MIPIB_DATA_LENGTH) | 7110 | _MIPIC_DATA_LENGTH) |
7061 | #define DATA_LENGTH_SHIFT 0 | 7111 | #define DATA_LENGTH_SHIFT 0 |
7062 | #define DATA_LENGTH_MASK (0xfffff << 0) | 7112 | #define DATA_LENGTH_MASK (0xfffff << 0) |
7063 | 7113 | ||
7064 | #define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) | 7114 | #define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110) |
7065 | #define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) | 7115 | #define _MIPIC_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910) |
7066 | #define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \ | 7116 | #define MIPI_COMMAND_ADDRESS(port) _MIPI_PORT(port, \ |
7067 | _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) | 7117 | _MIPIA_COMMAND_ADDRESS, _MIPIC_COMMAND_ADDRESS) |
7068 | #define COMMAND_MEM_ADDRESS_SHIFT 5 | 7118 | #define COMMAND_MEM_ADDRESS_SHIFT 5 |
7069 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) | 7119 | #define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) |
7070 | #define AUTO_PWG_ENABLE (1 << 2) | 7120 | #define AUTO_PWG_ENABLE (1 << 2) |
@@ -7072,22 +7122,22 @@ enum punit_power_well { | |||
7072 | #define COMMAND_VALID (1 << 0) | 7122 | #define COMMAND_VALID (1 << 0) |
7073 | 7123 | ||
7074 | #define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) | 7124 | #define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114) |
7075 | #define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) | 7125 | #define _MIPIC_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914) |
7076 | #define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \ | 7126 | #define MIPI_COMMAND_LENGTH(port) _MIPI_PORT(port, _MIPIA_COMMAND_LENGTH, \ |
7077 | _MIPIB_COMMAND_LENGTH) | 7127 | _MIPIC_COMMAND_LENGTH) |
7078 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ | 7128 | #define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ |
7079 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) | 7129 | #define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) |
7080 | 7130 | ||
7081 | #define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) | 7131 | #define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118) |
7082 | #define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) | 7132 | #define _MIPIC_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918) |
7083 | #define MIPI_READ_DATA_RETURN(tc, n) \ | 7133 | #define MIPI_READ_DATA_RETURN(port, n) \ |
7084 | (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \ | 7134 | (_MIPI_PORT(port, _MIPIA_READ_DATA_RETURN0, _MIPIC_READ_DATA_RETURN0) \ |
7085 | + 4 * (n)) /* n: 0...7 */ | 7135 | + 4 * (n)) /* n: 0...7 */ |
7086 | 7136 | ||
7087 | #define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) | 7137 | #define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138) |
7088 | #define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) | 7138 | #define _MIPIC_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938) |
7089 | #define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \ | 7139 | #define MIPI_READ_DATA_VALID(port) _MIPI_PORT(port, \ |
7090 | _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) | 7140 | _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) |
7091 | #define READ_DATA_VALID(n) (1 << (n)) | 7141 | #define READ_DATA_VALID(n) (1 << (n)) |
7092 | 7142 | ||
7093 | /* For UMS only (deprecated): */ | 7143 | /* For UMS only (deprecated): */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 26368822a33f..9f19ed38cdc3 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -264,7 +264,7 @@ static void i915_restore_display(struct drm_device *dev) | |||
264 | } | 264 | } |
265 | 265 | ||
266 | /* only restore FBC info on the platform that supports FBC*/ | 266 | /* only restore FBC info on the platform that supports FBC*/ |
267 | intel_disable_fbc(dev); | 267 | intel_fbc_disable(dev); |
268 | 268 | ||
269 | /* restore FBC interval */ | 269 | /* restore FBC interval */ |
270 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) | 270 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) |
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 751d4ad14d62..6058a01b4443 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h | |||
@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm, | |||
328 | TRACE_EVENT(i915_gem_ring_sync_to, | 328 | TRACE_EVENT(i915_gem_ring_sync_to, |
329 | TP_PROTO(struct intel_engine_cs *from, | 329 | TP_PROTO(struct intel_engine_cs *from, |
330 | struct intel_engine_cs *to, | 330 | struct intel_engine_cs *to, |
331 | u32 seqno), | 331 | struct drm_i915_gem_request *req), |
332 | TP_ARGS(from, to, seqno), | 332 | TP_ARGS(from, to, req), |
333 | 333 | ||
334 | TP_STRUCT__entry( | 334 | TP_STRUCT__entry( |
335 | __field(u32, dev) | 335 | __field(u32, dev) |
@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, | |||
342 | __entry->dev = from->dev->primary->index; | 342 | __entry->dev = from->dev->primary->index; |
343 | __entry->sync_from = from->id; | 343 | __entry->sync_from = from->id; |
344 | __entry->sync_to = to->id; | 344 | __entry->sync_to = to->id; |
345 | __entry->seqno = seqno; | 345 | __entry->seqno = i915_gem_request_get_seqno(req); |
346 | ), | 346 | ), |
347 | 347 | ||
348 | TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", | 348 | TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u", |
@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to, | |||
352 | ); | 352 | ); |
353 | 353 | ||
354 | TRACE_EVENT(i915_gem_ring_dispatch, | 354 | TRACE_EVENT(i915_gem_ring_dispatch, |
355 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags), | 355 | TP_PROTO(struct drm_i915_gem_request *req, u32 flags), |
356 | TP_ARGS(ring, seqno, flags), | 356 | TP_ARGS(req, flags), |
357 | 357 | ||
358 | TP_STRUCT__entry( | 358 | TP_STRUCT__entry( |
359 | __field(u32, dev) | 359 | __field(u32, dev) |
@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, | |||
363 | ), | 363 | ), |
364 | 364 | ||
365 | TP_fast_assign( | 365 | TP_fast_assign( |
366 | struct intel_engine_cs *ring = | ||
367 | i915_gem_request_get_ring(req); | ||
366 | __entry->dev = ring->dev->primary->index; | 368 | __entry->dev = ring->dev->primary->index; |
367 | __entry->ring = ring->id; | 369 | __entry->ring = ring->id; |
368 | __entry->seqno = seqno; | 370 | __entry->seqno = i915_gem_request_get_seqno(req); |
369 | __entry->flags = flags; | 371 | __entry->flags = flags; |
370 | i915_trace_irq_get(ring, seqno); | 372 | i915_trace_irq_get(ring, req); |
371 | ), | 373 | ), |
372 | 374 | ||
373 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", | 375 | TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", |
@@ -398,31 +400,36 @@ TRACE_EVENT(i915_gem_ring_flush, | |||
398 | ); | 400 | ); |
399 | 401 | ||
400 | DECLARE_EVENT_CLASS(i915_gem_request, | 402 | DECLARE_EVENT_CLASS(i915_gem_request, |
401 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno), | 403 | TP_PROTO(struct drm_i915_gem_request *req), |
402 | TP_ARGS(ring, seqno), | 404 | TP_ARGS(req), |
403 | 405 | ||
404 | TP_STRUCT__entry( | 406 | TP_STRUCT__entry( |
405 | __field(u32, dev) | 407 | __field(u32, dev) |
406 | __field(u32, ring) | 408 | __field(u32, ring) |
409 | __field(u32, uniq) | ||
407 | __field(u32, seqno) | 410 | __field(u32, seqno) |
408 | ), | 411 | ), |
409 | 412 | ||
410 | TP_fast_assign( | 413 | TP_fast_assign( |
414 | struct intel_engine_cs *ring = | ||
415 | i915_gem_request_get_ring(req); | ||
411 | __entry->dev = ring->dev->primary->index; | 416 | __entry->dev = ring->dev->primary->index; |
412 | __entry->ring = ring->id; | 417 | __entry->ring = ring->id; |
413 | __entry->seqno = seqno; | 418 | __entry->uniq = req ? req->uniq : 0; |
419 | __entry->seqno = i915_gem_request_get_seqno(req); | ||
414 | ), | 420 | ), |
415 | 421 | ||
416 | TP_printk("dev=%u, ring=%u, seqno=%u", | 422 | TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u", |
417 | __entry->dev, __entry->ring, __entry->seqno) | 423 | __entry->dev, __entry->ring, __entry->uniq, |
424 | __entry->seqno) | ||
418 | ); | 425 | ); |
419 | 426 | ||
420 | DEFINE_EVENT(i915_gem_request, i915_gem_request_add, | 427 | DEFINE_EVENT(i915_gem_request, i915_gem_request_add, |
421 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno), | 428 | TP_PROTO(struct drm_i915_gem_request *req), |
422 | TP_ARGS(ring, seqno) | 429 | TP_ARGS(req) |
423 | ); | 430 | ); |
424 | 431 | ||
425 | TRACE_EVENT(i915_gem_request_complete, | 432 | TRACE_EVENT(i915_gem_request_notify, |
426 | TP_PROTO(struct intel_engine_cs *ring), | 433 | TP_PROTO(struct intel_engine_cs *ring), |
427 | TP_ARGS(ring), | 434 | TP_ARGS(ring), |
428 | 435 | ||
@@ -443,17 +450,23 @@ TRACE_EVENT(i915_gem_request_complete, | |||
443 | ); | 450 | ); |
444 | 451 | ||
445 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, | 452 | DEFINE_EVENT(i915_gem_request, i915_gem_request_retire, |
446 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno), | 453 | TP_PROTO(struct drm_i915_gem_request *req), |
447 | TP_ARGS(ring, seqno) | 454 | TP_ARGS(req) |
455 | ); | ||
456 | |||
457 | DEFINE_EVENT(i915_gem_request, i915_gem_request_complete, | ||
458 | TP_PROTO(struct drm_i915_gem_request *req), | ||
459 | TP_ARGS(req) | ||
448 | ); | 460 | ); |
449 | 461 | ||
450 | TRACE_EVENT(i915_gem_request_wait_begin, | 462 | TRACE_EVENT(i915_gem_request_wait_begin, |
451 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno), | 463 | TP_PROTO(struct drm_i915_gem_request *req), |
452 | TP_ARGS(ring, seqno), | 464 | TP_ARGS(req), |
453 | 465 | ||
454 | TP_STRUCT__entry( | 466 | TP_STRUCT__entry( |
455 | __field(u32, dev) | 467 | __field(u32, dev) |
456 | __field(u32, ring) | 468 | __field(u32, ring) |
469 | __field(u32, uniq) | ||
457 | __field(u32, seqno) | 470 | __field(u32, seqno) |
458 | __field(bool, blocking) | 471 | __field(bool, blocking) |
459 | ), | 472 | ), |
@@ -465,20 +478,24 @@ TRACE_EVENT(i915_gem_request_wait_begin, | |||
465 | * less desirable. | 478 | * less desirable. |
466 | */ | 479 | */ |
467 | TP_fast_assign( | 480 | TP_fast_assign( |
481 | struct intel_engine_cs *ring = | ||
482 | i915_gem_request_get_ring(req); | ||
468 | __entry->dev = ring->dev->primary->index; | 483 | __entry->dev = ring->dev->primary->index; |
469 | __entry->ring = ring->id; | 484 | __entry->ring = ring->id; |
470 | __entry->seqno = seqno; | 485 | __entry->uniq = req ? req->uniq : 0; |
471 | __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex); | 486 | __entry->seqno = i915_gem_request_get_seqno(req); |
487 | __entry->blocking = | ||
488 | mutex_is_locked(&ring->dev->struct_mutex); | ||
472 | ), | 489 | ), |
473 | 490 | ||
474 | TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", | 491 | TP_printk("dev=%u, ring=%u, uniq=%u, seqno=%u, blocking=%s", |
475 | __entry->dev, __entry->ring, __entry->seqno, | 492 | __entry->dev, __entry->ring, __entry->uniq, |
476 | __entry->blocking ? "yes (NB)" : "no") | 493 | __entry->seqno, __entry->blocking ? "yes (NB)" : "no") |
477 | ); | 494 | ); |
478 | 495 | ||
479 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, | 496 | DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end, |
480 | TP_PROTO(struct intel_engine_cs *ring, u32 seqno), | 497 | TP_PROTO(struct drm_i915_gem_request *req), |
481 | TP_ARGS(ring, seqno) | 498 | TP_ARGS(req) |
482 | ); | 499 | ); |
483 | 500 | ||
484 | DECLARE_EVENT_CLASS(i915_ring, | 501 | DECLARE_EVENT_CLASS(i915_ring, |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index a4bd90f36a03..65b1fbc5eb57 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -314,6 +314,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
314 | { | 314 | { |
315 | const struct bdb_lfp_backlight_data *backlight_data; | 315 | const struct bdb_lfp_backlight_data *backlight_data; |
316 | const struct bdb_lfp_backlight_data_entry *entry; | 316 | const struct bdb_lfp_backlight_data_entry *entry; |
317 | const struct bdb_lfp_backlight_control_data *bl_ctrl_data; | ||
317 | 318 | ||
318 | backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); | 319 | backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); |
319 | if (!backlight_data) | 320 | if (!backlight_data) |
@@ -326,6 +327,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
326 | } | 327 | } |
327 | 328 | ||
328 | entry = &backlight_data->data[panel_type]; | 329 | entry = &backlight_data->data[panel_type]; |
330 | bl_ctrl_data = &backlight_data->blc_ctl[panel_type]; | ||
329 | 331 | ||
330 | dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; | 332 | dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM; |
331 | if (!dev_priv->vbt.backlight.present) { | 333 | if (!dev_priv->vbt.backlight.present) { |
@@ -337,12 +339,30 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
337 | dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; | 339 | dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; |
338 | dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; | 340 | dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; |
339 | dev_priv->vbt.backlight.min_brightness = entry->min_brightness; | 341 | dev_priv->vbt.backlight.min_brightness = entry->min_brightness; |
342 | |||
343 | dev_priv->vbt.backlight.controller = 0; | ||
344 | if (bdb->version >= 191) { | ||
345 | dev_priv->vbt.backlight.present = | ||
346 | bl_ctrl_data->pin == BLC_CONTROL_PIN_DDI; | ||
347 | if (!dev_priv->vbt.backlight.present) { | ||
348 | DRM_DEBUG_KMS("BL control pin is not DDI (pin %u)\n", | ||
349 | bl_ctrl_data->pin); | ||
350 | return; | ||
351 | } | ||
352 | if (bl_ctrl_data->controller == 1) | ||
353 | dev_priv->vbt.backlight.controller = | ||
354 | bl_ctrl_data->controller; | ||
355 | } | ||
356 | |||
340 | DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " | 357 | DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, " |
341 | "active %s, min brightness %u, level %u\n", | 358 | "active %s, min brightness %u, level %u\n", |
342 | dev_priv->vbt.backlight.pwm_freq_hz, | 359 | dev_priv->vbt.backlight.pwm_freq_hz, |
343 | dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", | 360 | dev_priv->vbt.backlight.active_low_pwm ? "low" : "high", |
344 | dev_priv->vbt.backlight.min_brightness, | 361 | dev_priv->vbt.backlight.min_brightness, |
345 | backlight_data->level[panel_type]); | 362 | backlight_data->level[panel_type]); |
363 | |||
364 | DRM_DEBUG_KMS("VBT BL controller %u\n", | ||
365 | dev_priv->vbt.backlight.controller); | ||
346 | } | 366 | } |
347 | 367 | ||
348 | /* Try to find sdvo panel data */ | 368 | /* Try to find sdvo panel data */ |
@@ -664,6 +684,50 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
664 | } | 684 | } |
665 | } | 685 | } |
666 | 686 | ||
687 | static void | ||
688 | parse_psr(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | ||
689 | { | ||
690 | struct bdb_psr *psr; | ||
691 | struct psr_table *psr_table; | ||
692 | |||
693 | psr = find_section(bdb, BDB_PSR); | ||
694 | if (!psr) { | ||
695 | DRM_DEBUG_KMS("No PSR BDB found.\n"); | ||
696 | return; | ||
697 | } | ||
698 | |||
699 | psr_table = &psr->psr_table[panel_type]; | ||
700 | |||
701 | dev_priv->vbt.psr.full_link = psr_table->full_link; | ||
702 | dev_priv->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup; | ||
703 | |||
704 | /* Allowed VBT values goes from 0 to 15 */ | ||
705 | dev_priv->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : | ||
706 | psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; | ||
707 | |||
708 | switch (psr_table->lines_to_wait) { | ||
709 | case 0: | ||
710 | dev_priv->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT; | ||
711 | break; | ||
712 | case 1: | ||
713 | dev_priv->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT; | ||
714 | break; | ||
715 | case 2: | ||
716 | dev_priv->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT; | ||
717 | break; | ||
718 | case 3: | ||
719 | dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT; | ||
720 | break; | ||
721 | default: | ||
722 | DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n", | ||
723 | psr_table->lines_to_wait); | ||
724 | break; | ||
725 | } | ||
726 | |||
727 | dev_priv->vbt.psr.tp1_wakeup_time = psr_table->tp1_wakeup_time; | ||
728 | dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time; | ||
729 | } | ||
730 | |||
667 | static u8 *goto_next_sequence(u8 *data, int *size) | 731 | static u8 *goto_next_sequence(u8 *data, int *size) |
668 | { | 732 | { |
669 | u16 len; | 733 | u16 len; |
@@ -1241,6 +1305,7 @@ intel_parse_bios(struct drm_device *dev) | |||
1241 | parse_device_mapping(dev_priv, bdb); | 1305 | parse_device_mapping(dev_priv, bdb); |
1242 | parse_driver_features(dev_priv, bdb); | 1306 | parse_driver_features(dev_priv, bdb); |
1243 | parse_edp(dev_priv, bdb); | 1307 | parse_edp(dev_priv, bdb); |
1308 | parse_psr(dev_priv, bdb); | ||
1244 | parse_mipi(dev_priv, bdb); | 1309 | parse_mipi(dev_priv, bdb); |
1245 | parse_ddi_ports(dev_priv, bdb); | 1310 | parse_ddi_ports(dev_priv, bdb); |
1246 | 1311 | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 7603765c91fc..9a7202e5caf4 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -80,7 +80,7 @@ struct vbios_data { | |||
80 | #define BDB_EXT_MMIO_REGS 6 | 80 | #define BDB_EXT_MMIO_REGS 6 |
81 | #define BDB_SWF_IO 7 | 81 | #define BDB_SWF_IO 7 |
82 | #define BDB_SWF_MMIO 8 | 82 | #define BDB_SWF_MMIO 8 |
83 | #define BDB_DOT_CLOCK_TABLE 9 | 83 | #define BDB_PSR 9 |
84 | #define BDB_MODE_REMOVAL_TABLE 10 | 84 | #define BDB_MODE_REMOVAL_TABLE 10 |
85 | #define BDB_CHILD_DEVICE_TABLE 11 | 85 | #define BDB_CHILD_DEVICE_TABLE 11 |
86 | #define BDB_DRIVER_FEATURES 12 | 86 | #define BDB_DRIVER_FEATURES 12 |
@@ -402,10 +402,21 @@ struct bdb_lfp_backlight_data_entry { | |||
402 | u8 obsolete3; | 402 | u8 obsolete3; |
403 | } __packed; | 403 | } __packed; |
404 | 404 | ||
405 | #define BLC_CONTROL_PIN_PMIC 0 | ||
406 | #define BLC_CONTROL_PIN_LPSS_PWM 1 | ||
407 | #define BLC_CONTROL_PIN_DDI 2 | ||
408 | #define BLC_CONTROL_PIN_CABC 3 | ||
409 | |||
410 | struct bdb_lfp_backlight_control_data { | ||
411 | u8 controller:4; | ||
412 | u8 pin:4; | ||
413 | } __packed; | ||
414 | |||
405 | struct bdb_lfp_backlight_data { | 415 | struct bdb_lfp_backlight_data { |
406 | u8 entry_size; | 416 | u8 entry_size; |
407 | struct bdb_lfp_backlight_data_entry data[16]; | 417 | struct bdb_lfp_backlight_data_entry data[16]; |
408 | u8 level[16]; | 418 | u8 level[16]; |
419 | struct bdb_lfp_backlight_control_data blc_ctl[16]; | ||
409 | } __packed; | 420 | } __packed; |
410 | 421 | ||
411 | struct aimdb_header { | 422 | struct aimdb_header { |
@@ -556,6 +567,26 @@ struct bdb_edp { | |||
556 | u16 edp_t3_optimization; | 567 | u16 edp_t3_optimization; |
557 | } __packed; | 568 | } __packed; |
558 | 569 | ||
570 | struct psr_table { | ||
571 | /* Feature bits */ | ||
572 | u8 full_link:1; | ||
573 | u8 require_aux_to_wakeup:1; | ||
574 | u8 feature_bits_rsvd:6; | ||
575 | |||
576 | /* Wait times */ | ||
577 | u8 idle_frames:4; | ||
578 | u8 lines_to_wait:3; | ||
579 | u8 wait_times_rsvd:1; | ||
580 | |||
581 | /* TP wake up time in multiple of 100 */ | ||
582 | u16 tp1_wakeup_time; | ||
583 | u16 tp2_tp3_wakeup_time; | ||
584 | } __packed; | ||
585 | |||
586 | struct bdb_psr { | ||
587 | struct psr_table psr_table[16]; | ||
588 | } __packed; | ||
589 | |||
559 | void intel_setup_bios(struct drm_device *dev); | 590 | void intel_setup_bios(struct drm_device *dev); |
560 | int intel_parse_bios(struct drm_device *dev); | 591 | int intel_parse_bios(struct drm_device *dev); |
561 | 592 | ||
@@ -798,7 +829,8 @@ struct mipi_config { | |||
798 | #define DUAL_LINK_PIXEL_ALT 2 | 829 | #define DUAL_LINK_PIXEL_ALT 2 |
799 | u16 dual_link:2; | 830 | u16 dual_link:2; |
800 | u16 lane_cnt:2; | 831 | u16 lane_cnt:2; |
801 | u16 rsvd3:12; | 832 | u16 pixel_overlap:3; |
833 | u16 rsvd3:9; | ||
802 | 834 | ||
803 | u16 rsvd4; | 835 | u16 rsvd4; |
804 | 836 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e6b45cd150d3..1c92ad47502b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -128,15 +128,15 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = { | |||
128 | }; | 128 | }; |
129 | 129 | ||
130 | static const struct ddi_buf_trans skl_ddi_translations_dp[] = { | 130 | static const struct ddi_buf_trans skl_ddi_translations_dp[] = { |
131 | { 0x00000018, 0x000000a0 }, | 131 | { 0x00000018, 0x000000a2 }, |
132 | { 0x00004014, 0x00000098 }, | 132 | { 0x00004014, 0x0000009B }, |
133 | { 0x00006012, 0x00000088 }, | 133 | { 0x00006012, 0x00000088 }, |
134 | { 0x00008010, 0x00000080 }, | 134 | { 0x00008010, 0x00000087 }, |
135 | { 0x00000018, 0x00000098 }, | 135 | { 0x00000018, 0x0000009B }, |
136 | { 0x00004014, 0x00000088 }, | 136 | { 0x00004014, 0x00000088 }, |
137 | { 0x00006012, 0x00000080 }, | 137 | { 0x00006012, 0x00000087 }, |
138 | { 0x00000018, 0x00000088 }, | 138 | { 0x00000018, 0x00000088 }, |
139 | { 0x00004014, 0x00000080 }, | 139 | { 0x00004014, 0x00000087 }, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { | 142 | static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { |
@@ -834,7 +834,12 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, | |||
834 | void intel_ddi_clock_get(struct intel_encoder *encoder, | 834 | void intel_ddi_clock_get(struct intel_encoder *encoder, |
835 | struct intel_crtc_config *pipe_config) | 835 | struct intel_crtc_config *pipe_config) |
836 | { | 836 | { |
837 | hsw_ddi_clock_get(encoder, pipe_config); | 837 | struct drm_device *dev = encoder->base.dev; |
838 | |||
839 | if (INTEL_INFO(dev)->gen <= 8) | ||
840 | hsw_ddi_clock_get(encoder, pipe_config); | ||
841 | else | ||
842 | skl_ddi_clock_get(encoder, pipe_config); | ||
838 | } | 843 | } |
839 | 844 | ||
840 | static void | 845 | static void |
@@ -2029,7 +2034,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2029 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; | 2034 | enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; |
2030 | struct intel_hdmi *intel_hdmi; | 2035 | struct intel_hdmi *intel_hdmi; |
2031 | u32 temp, flags = 0; | 2036 | u32 temp, flags = 0; |
2032 | struct drm_device *dev = dev_priv->dev; | ||
2033 | 2037 | ||
2034 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | 2038 | temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
2035 | if (temp & TRANS_DDI_PHSYNC) | 2039 | if (temp & TRANS_DDI_PHSYNC) |
@@ -2106,10 +2110,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
2106 | dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; | 2110 | dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; |
2107 | } | 2111 | } |
2108 | 2112 | ||
2109 | if (INTEL_INFO(dev)->gen <= 8) | 2113 | intel_ddi_clock_get(encoder, pipe_config); |
2110 | hsw_ddi_clock_get(encoder, pipe_config); | ||
2111 | else | ||
2112 | skl_ddi_clock_get(encoder, pipe_config); | ||
2113 | } | 2114 | } |
2114 | 2115 | ||
2115 | static void intel_ddi_destroy(struct drm_encoder *encoder) | 2116 | static void intel_ddi_destroy(struct drm_encoder *encoder) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fb3e3d429191..d01db1b82869 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1024,7 +1024,7 @@ void assert_pll(struct drm_i915_private *dev_priv, | |||
1024 | reg = DPLL(pipe); | 1024 | reg = DPLL(pipe); |
1025 | val = I915_READ(reg); | 1025 | val = I915_READ(reg); |
1026 | cur_state = !!(val & DPLL_VCO_ENABLE); | 1026 | cur_state = !!(val & DPLL_VCO_ENABLE); |
1027 | WARN(cur_state != state, | 1027 | I915_STATE_WARN(cur_state != state, |
1028 | "PLL state assertion failure (expected %s, current %s)\n", | 1028 | "PLL state assertion failure (expected %s, current %s)\n", |
1029 | state_string(state), state_string(cur_state)); | 1029 | state_string(state), state_string(cur_state)); |
1030 | } | 1030 | } |
@@ -1040,7 +1040,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) | |||
1040 | mutex_unlock(&dev_priv->dpio_lock); | 1040 | mutex_unlock(&dev_priv->dpio_lock); |
1041 | 1041 | ||
1042 | cur_state = val & DSI_PLL_VCO_EN; | 1042 | cur_state = val & DSI_PLL_VCO_EN; |
1043 | WARN(cur_state != state, | 1043 | I915_STATE_WARN(cur_state != state, |
1044 | "DSI PLL state assertion failure (expected %s, current %s)\n", | 1044 | "DSI PLL state assertion failure (expected %s, current %s)\n", |
1045 | state_string(state), state_string(cur_state)); | 1045 | state_string(state), state_string(cur_state)); |
1046 | } | 1046 | } |
@@ -1071,7 +1071,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
1071 | return; | 1071 | return; |
1072 | 1072 | ||
1073 | cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); | 1073 | cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); |
1074 | WARN(cur_state != state, | 1074 | I915_STATE_WARN(cur_state != state, |
1075 | "%s assertion failure (expected %s, current %s)\n", | 1075 | "%s assertion failure (expected %s, current %s)\n", |
1076 | pll->name, state_string(state), state_string(cur_state)); | 1076 | pll->name, state_string(state), state_string(cur_state)); |
1077 | } | 1077 | } |
@@ -1095,7 +1095,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, | |||
1095 | val = I915_READ(reg); | 1095 | val = I915_READ(reg); |
1096 | cur_state = !!(val & FDI_TX_ENABLE); | 1096 | cur_state = !!(val & FDI_TX_ENABLE); |
1097 | } | 1097 | } |
1098 | WARN(cur_state != state, | 1098 | I915_STATE_WARN(cur_state != state, |
1099 | "FDI TX state assertion failure (expected %s, current %s)\n", | 1099 | "FDI TX state assertion failure (expected %s, current %s)\n", |
1100 | state_string(state), state_string(cur_state)); | 1100 | state_string(state), state_string(cur_state)); |
1101 | } | 1101 | } |
@@ -1112,7 +1112,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv, | |||
1112 | reg = FDI_RX_CTL(pipe); | 1112 | reg = FDI_RX_CTL(pipe); |
1113 | val = I915_READ(reg); | 1113 | val = I915_READ(reg); |
1114 | cur_state = !!(val & FDI_RX_ENABLE); | 1114 | cur_state = !!(val & FDI_RX_ENABLE); |
1115 | WARN(cur_state != state, | 1115 | I915_STATE_WARN(cur_state != state, |
1116 | "FDI RX state assertion failure (expected %s, current %s)\n", | 1116 | "FDI RX state assertion failure (expected %s, current %s)\n", |
1117 | state_string(state), state_string(cur_state)); | 1117 | state_string(state), state_string(cur_state)); |
1118 | } | 1118 | } |
@@ -1135,7 +1135,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | |||
1135 | 1135 | ||
1136 | reg = FDI_TX_CTL(pipe); | 1136 | reg = FDI_TX_CTL(pipe); |
1137 | val = I915_READ(reg); | 1137 | val = I915_READ(reg); |
1138 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | 1138 | I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); |
1139 | } | 1139 | } |
1140 | 1140 | ||
1141 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | 1141 | void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, |
@@ -1148,7 +1148,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, | |||
1148 | reg = FDI_RX_CTL(pipe); | 1148 | reg = FDI_RX_CTL(pipe); |
1149 | val = I915_READ(reg); | 1149 | val = I915_READ(reg); |
1150 | cur_state = !!(val & FDI_RX_PLL_ENABLE); | 1150 | cur_state = !!(val & FDI_RX_PLL_ENABLE); |
1151 | WARN(cur_state != state, | 1151 | I915_STATE_WARN(cur_state != state, |
1152 | "FDI RX PLL assertion failure (expected %s, current %s)\n", | 1152 | "FDI RX PLL assertion failure (expected %s, current %s)\n", |
1153 | state_string(state), state_string(cur_state)); | 1153 | state_string(state), state_string(cur_state)); |
1154 | } | 1154 | } |
@@ -1190,7 +1190,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, | |||
1190 | ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) | 1190 | ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) |
1191 | locked = false; | 1191 | locked = false; |
1192 | 1192 | ||
1193 | WARN(panel_pipe == pipe && locked, | 1193 | I915_STATE_WARN(panel_pipe == pipe && locked, |
1194 | "panel assertion failure, pipe %c regs locked\n", | 1194 | "panel assertion failure, pipe %c regs locked\n", |
1195 | pipe_name(pipe)); | 1195 | pipe_name(pipe)); |
1196 | } | 1196 | } |
@@ -1206,7 +1206,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv, | |||
1206 | else | 1206 | else |
1207 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; | 1207 | cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; |
1208 | 1208 | ||
1209 | WARN(cur_state != state, | 1209 | I915_STATE_WARN(cur_state != state, |
1210 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", | 1210 | "cursor on pipe %c assertion failure (expected %s, current %s)\n", |
1211 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 1211 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1212 | } | 1212 | } |
@@ -1236,7 +1236,7 @@ void assert_pipe(struct drm_i915_private *dev_priv, | |||
1236 | cur_state = !!(val & PIPECONF_ENABLE); | 1236 | cur_state = !!(val & PIPECONF_ENABLE); |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | WARN(cur_state != state, | 1239 | I915_STATE_WARN(cur_state != state, |
1240 | "pipe %c assertion failure (expected %s, current %s)\n", | 1240 | "pipe %c assertion failure (expected %s, current %s)\n", |
1241 | pipe_name(pipe), state_string(state), state_string(cur_state)); | 1241 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
1242 | } | 1242 | } |
@@ -1251,7 +1251,7 @@ static void assert_plane(struct drm_i915_private *dev_priv, | |||
1251 | reg = DSPCNTR(plane); | 1251 | reg = DSPCNTR(plane); |
1252 | val = I915_READ(reg); | 1252 | val = I915_READ(reg); |
1253 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); | 1253 | cur_state = !!(val & DISPLAY_PLANE_ENABLE); |
1254 | WARN(cur_state != state, | 1254 | I915_STATE_WARN(cur_state != state, |
1255 | "plane %c assertion failure (expected %s, current %s)\n", | 1255 | "plane %c assertion failure (expected %s, current %s)\n", |
1256 | plane_name(plane), state_string(state), state_string(cur_state)); | 1256 | plane_name(plane), state_string(state), state_string(cur_state)); |
1257 | } | 1257 | } |
@@ -1271,7 +1271,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
1271 | if (INTEL_INFO(dev)->gen >= 4) { | 1271 | if (INTEL_INFO(dev)->gen >= 4) { |
1272 | reg = DSPCNTR(pipe); | 1272 | reg = DSPCNTR(pipe); |
1273 | val = I915_READ(reg); | 1273 | val = I915_READ(reg); |
1274 | WARN(val & DISPLAY_PLANE_ENABLE, | 1274 | I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, |
1275 | "plane %c assertion failure, should be disabled but not\n", | 1275 | "plane %c assertion failure, should be disabled but not\n", |
1276 | plane_name(pipe)); | 1276 | plane_name(pipe)); |
1277 | return; | 1277 | return; |
@@ -1283,7 +1283,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |||
1283 | val = I915_READ(reg); | 1283 | val = I915_READ(reg); |
1284 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | 1284 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> |
1285 | DISPPLANE_SEL_PIPE_SHIFT; | 1285 | DISPPLANE_SEL_PIPE_SHIFT; |
1286 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | 1286 | I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, |
1287 | "plane %c assertion failure, should be off on pipe %c but is still active\n", | 1287 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
1288 | plane_name(i), pipe_name(pipe)); | 1288 | plane_name(i), pipe_name(pipe)); |
1289 | } | 1289 | } |
@@ -1299,7 +1299,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1299 | if (INTEL_INFO(dev)->gen >= 9) { | 1299 | if (INTEL_INFO(dev)->gen >= 9) { |
1300 | for_each_sprite(pipe, sprite) { | 1300 | for_each_sprite(pipe, sprite) { |
1301 | val = I915_READ(PLANE_CTL(pipe, sprite)); | 1301 | val = I915_READ(PLANE_CTL(pipe, sprite)); |
1302 | WARN(val & PLANE_CTL_ENABLE, | 1302 | I915_STATE_WARN(val & PLANE_CTL_ENABLE, |
1303 | "plane %d assertion failure, should be off on pipe %c but is still active\n", | 1303 | "plane %d assertion failure, should be off on pipe %c but is still active\n", |
1304 | sprite, pipe_name(pipe)); | 1304 | sprite, pipe_name(pipe)); |
1305 | } | 1305 | } |
@@ -1307,20 +1307,20 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1307 | for_each_sprite(pipe, sprite) { | 1307 | for_each_sprite(pipe, sprite) { |
1308 | reg = SPCNTR(pipe, sprite); | 1308 | reg = SPCNTR(pipe, sprite); |
1309 | val = I915_READ(reg); | 1309 | val = I915_READ(reg); |
1310 | WARN(val & SP_ENABLE, | 1310 | I915_STATE_WARN(val & SP_ENABLE, |
1311 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1311 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1312 | sprite_name(pipe, sprite), pipe_name(pipe)); | 1312 | sprite_name(pipe, sprite), pipe_name(pipe)); |
1313 | } | 1313 | } |
1314 | } else if (INTEL_INFO(dev)->gen >= 7) { | 1314 | } else if (INTEL_INFO(dev)->gen >= 7) { |
1315 | reg = SPRCTL(pipe); | 1315 | reg = SPRCTL(pipe); |
1316 | val = I915_READ(reg); | 1316 | val = I915_READ(reg); |
1317 | WARN(val & SPRITE_ENABLE, | 1317 | I915_STATE_WARN(val & SPRITE_ENABLE, |
1318 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1318 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1319 | plane_name(pipe), pipe_name(pipe)); | 1319 | plane_name(pipe), pipe_name(pipe)); |
1320 | } else if (INTEL_INFO(dev)->gen >= 5) { | 1320 | } else if (INTEL_INFO(dev)->gen >= 5) { |
1321 | reg = DVSCNTR(pipe); | 1321 | reg = DVSCNTR(pipe); |
1322 | val = I915_READ(reg); | 1322 | val = I915_READ(reg); |
1323 | WARN(val & DVS_ENABLE, | 1323 | I915_STATE_WARN(val & DVS_ENABLE, |
1324 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", | 1324 | "sprite %c assertion failure, should be off on pipe %c but is still active\n", |
1325 | plane_name(pipe), pipe_name(pipe)); | 1325 | plane_name(pipe), pipe_name(pipe)); |
1326 | } | 1326 | } |
@@ -1328,7 +1328,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, | |||
1328 | 1328 | ||
1329 | static void assert_vblank_disabled(struct drm_crtc *crtc) | 1329 | static void assert_vblank_disabled(struct drm_crtc *crtc) |
1330 | { | 1330 | { |
1331 | if (WARN_ON(drm_crtc_vblank_get(crtc) == 0)) | 1331 | if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) |
1332 | drm_crtc_vblank_put(crtc); | 1332 | drm_crtc_vblank_put(crtc); |
1333 | } | 1333 | } |
1334 | 1334 | ||
@@ -1337,12 +1337,12 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) | |||
1337 | u32 val; | 1337 | u32 val; |
1338 | bool enabled; | 1338 | bool enabled; |
1339 | 1339 | ||
1340 | WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); | 1340 | I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); |
1341 | 1341 | ||
1342 | val = I915_READ(PCH_DREF_CONTROL); | 1342 | val = I915_READ(PCH_DREF_CONTROL); |
1343 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | 1343 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | |
1344 | DREF_SUPERSPREAD_SOURCE_MASK)); | 1344 | DREF_SUPERSPREAD_SOURCE_MASK)); |
1345 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | 1345 | I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); |
1346 | } | 1346 | } |
1347 | 1347 | ||
1348 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, | 1348 | static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, |
@@ -1355,7 +1355,7 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, | |||
1355 | reg = PCH_TRANSCONF(pipe); | 1355 | reg = PCH_TRANSCONF(pipe); |
1356 | val = I915_READ(reg); | 1356 | val = I915_READ(reg); |
1357 | enabled = !!(val & TRANS_ENABLE); | 1357 | enabled = !!(val & TRANS_ENABLE); |
1358 | WARN(enabled, | 1358 | I915_STATE_WARN(enabled, |
1359 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | 1359 | "transcoder assertion failed, should be off on pipe %c but is still active\n", |
1360 | pipe_name(pipe)); | 1360 | pipe_name(pipe)); |
1361 | } | 1361 | } |
@@ -1435,11 +1435,11 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, | |||
1435 | enum pipe pipe, int reg, u32 port_sel) | 1435 | enum pipe pipe, int reg, u32 port_sel) |
1436 | { | 1436 | { |
1437 | u32 val = I915_READ(reg); | 1437 | u32 val = I915_READ(reg); |
1438 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), | 1438 | I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
1439 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", | 1439 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
1440 | reg, pipe_name(pipe)); | 1440 | reg, pipe_name(pipe)); |
1441 | 1441 | ||
1442 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 | 1442 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 |
1443 | && (val & DP_PIPEB_SELECT), | 1443 | && (val & DP_PIPEB_SELECT), |
1444 | "IBX PCH dp port still using transcoder B\n"); | 1444 | "IBX PCH dp port still using transcoder B\n"); |
1445 | } | 1445 | } |
@@ -1448,11 +1448,11 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |||
1448 | enum pipe pipe, int reg) | 1448 | enum pipe pipe, int reg) |
1449 | { | 1449 | { |
1450 | u32 val = I915_READ(reg); | 1450 | u32 val = I915_READ(reg); |
1451 | WARN(hdmi_pipe_enabled(dev_priv, pipe, val), | 1451 | I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), |
1452 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", | 1452 | "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", |
1453 | reg, pipe_name(pipe)); | 1453 | reg, pipe_name(pipe)); |
1454 | 1454 | ||
1455 | WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 | 1455 | I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 |
1456 | && (val & SDVO_PIPE_B_SELECT), | 1456 | && (val & SDVO_PIPE_B_SELECT), |
1457 | "IBX PCH hdmi port still using transcoder B\n"); | 1457 | "IBX PCH hdmi port still using transcoder B\n"); |
1458 | } | 1458 | } |
@@ -1469,13 +1469,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1469 | 1469 | ||
1470 | reg = PCH_ADPA; | 1470 | reg = PCH_ADPA; |
1471 | val = I915_READ(reg); | 1471 | val = I915_READ(reg); |
1472 | WARN(adpa_pipe_enabled(dev_priv, pipe, val), | 1472 | I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), |
1473 | "PCH VGA enabled on transcoder %c, should be disabled\n", | 1473 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
1474 | pipe_name(pipe)); | 1474 | pipe_name(pipe)); |
1475 | 1475 | ||
1476 | reg = PCH_LVDS; | 1476 | reg = PCH_LVDS; |
1477 | val = I915_READ(reg); | 1477 | val = I915_READ(reg); |
1478 | WARN(lvds_pipe_enabled(dev_priv, pipe, val), | 1478 | I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), |
1479 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1479 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1480 | pipe_name(pipe)); | 1480 | pipe_name(pipe)); |
1481 | 1481 | ||
@@ -2954,71 +2954,6 @@ static void intel_update_pipe_size(struct intel_crtc *crtc) | |||
2954 | crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; | 2954 | crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay; |
2955 | } | 2955 | } |
2956 | 2956 | ||
2957 | static int | ||
2958 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | ||
2959 | struct drm_framebuffer *fb) | ||
2960 | { | ||
2961 | struct drm_device *dev = crtc->dev; | ||
2962 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2963 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2964 | enum pipe pipe = intel_crtc->pipe; | ||
2965 | struct drm_framebuffer *old_fb = crtc->primary->fb; | ||
2966 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); | ||
2967 | int ret; | ||
2968 | |||
2969 | if (intel_crtc_has_pending_flip(crtc)) { | ||
2970 | DRM_ERROR("pipe is still busy with an old pageflip\n"); | ||
2971 | return -EBUSY; | ||
2972 | } | ||
2973 | |||
2974 | /* no fb bound */ | ||
2975 | if (!fb) { | ||
2976 | DRM_ERROR("No FB bound\n"); | ||
2977 | return 0; | ||
2978 | } | ||
2979 | |||
2980 | if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) { | ||
2981 | DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n", | ||
2982 | plane_name(intel_crtc->plane), | ||
2983 | INTEL_INFO(dev)->num_pipes); | ||
2984 | return -EINVAL; | ||
2985 | } | ||
2986 | |||
2987 | mutex_lock(&dev->struct_mutex); | ||
2988 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL); | ||
2989 | if (ret == 0) | ||
2990 | i915_gem_track_fb(old_obj, intel_fb_obj(fb), | ||
2991 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
2992 | mutex_unlock(&dev->struct_mutex); | ||
2993 | if (ret != 0) { | ||
2994 | DRM_ERROR("pin & fence failed\n"); | ||
2995 | return ret; | ||
2996 | } | ||
2997 | |||
2998 | dev_priv->display.update_primary_plane(crtc, fb, x, y); | ||
2999 | |||
3000 | if (intel_crtc->active) | ||
3001 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
3002 | |||
3003 | crtc->primary->fb = fb; | ||
3004 | crtc->x = x; | ||
3005 | crtc->y = y; | ||
3006 | |||
3007 | if (old_fb) { | ||
3008 | if (intel_crtc->active && old_fb != fb) | ||
3009 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
3010 | mutex_lock(&dev->struct_mutex); | ||
3011 | intel_unpin_fb_obj(old_obj); | ||
3012 | mutex_unlock(&dev->struct_mutex); | ||
3013 | } | ||
3014 | |||
3015 | mutex_lock(&dev->struct_mutex); | ||
3016 | intel_update_fbc(dev); | ||
3017 | mutex_unlock(&dev->struct_mutex); | ||
3018 | |||
3019 | return 0; | ||
3020 | } | ||
3021 | |||
3022 | static void intel_fdi_normal_train(struct drm_crtc *crtc) | 2957 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
3023 | { | 2958 | { |
3024 | struct drm_device *dev = crtc->dev; | 2959 | struct drm_device *dev = crtc->dev; |
@@ -4125,7 +4060,7 @@ static void intel_disable_planes(struct drm_crtc *crtc) | |||
4125 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { | 4060 | drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { |
4126 | intel_plane = to_intel_plane(plane); | 4061 | intel_plane = to_intel_plane(plane); |
4127 | if (intel_plane->pipe == pipe) | 4062 | if (intel_plane->pipe == pipe) |
4128 | intel_plane_disable(&intel_plane->base); | 4063 | plane->funcs->disable_plane(plane); |
4129 | } | 4064 | } |
4130 | } | 4065 | } |
4131 | 4066 | ||
@@ -4266,7 +4201,7 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc) | |||
4266 | hsw_enable_ips(intel_crtc); | 4201 | hsw_enable_ips(intel_crtc); |
4267 | 4202 | ||
4268 | mutex_lock(&dev->struct_mutex); | 4203 | mutex_lock(&dev->struct_mutex); |
4269 | intel_update_fbc(dev); | 4204 | intel_fbc_update(dev); |
4270 | mutex_unlock(&dev->struct_mutex); | 4205 | mutex_unlock(&dev->struct_mutex); |
4271 | 4206 | ||
4272 | /* | 4207 | /* |
@@ -4288,7 +4223,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) | |||
4288 | intel_crtc_wait_for_pending_flips(crtc); | 4223 | intel_crtc_wait_for_pending_flips(crtc); |
4289 | 4224 | ||
4290 | if (dev_priv->fbc.plane == plane) | 4225 | if (dev_priv->fbc.plane == plane) |
4291 | intel_disable_fbc(dev); | 4226 | intel_fbc_disable(dev); |
4292 | 4227 | ||
4293 | hsw_disable_ips(intel_crtc); | 4228 | hsw_disable_ips(intel_crtc); |
4294 | 4229 | ||
@@ -4591,7 +4526,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
4591 | intel_update_watermarks(crtc); | 4526 | intel_update_watermarks(crtc); |
4592 | 4527 | ||
4593 | mutex_lock(&dev->struct_mutex); | 4528 | mutex_lock(&dev->struct_mutex); |
4594 | intel_update_fbc(dev); | 4529 | intel_fbc_update(dev); |
4595 | mutex_unlock(&dev->struct_mutex); | 4530 | mutex_unlock(&dev->struct_mutex); |
4596 | } | 4531 | } |
4597 | 4532 | ||
@@ -4646,7 +4581,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
4646 | intel_update_watermarks(crtc); | 4581 | intel_update_watermarks(crtc); |
4647 | 4582 | ||
4648 | mutex_lock(&dev->struct_mutex); | 4583 | mutex_lock(&dev->struct_mutex); |
4649 | intel_update_fbc(dev); | 4584 | intel_fbc_update(dev); |
4650 | mutex_unlock(&dev->struct_mutex); | 4585 | mutex_unlock(&dev->struct_mutex); |
4651 | 4586 | ||
4652 | if (intel_crtc_to_shared_dpll(intel_crtc)) | 4587 | if (intel_crtc_to_shared_dpll(intel_crtc)) |
@@ -4909,7 +4844,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) | |||
4909 | cmd = 0; | 4844 | cmd = 0; |
4910 | break; | 4845 | break; |
4911 | default: | 4846 | default: |
4912 | WARN_ON(1); | 4847 | MISSING_CASE(cdclk); |
4913 | return; | 4848 | return; |
4914 | } | 4849 | } |
4915 | 4850 | ||
@@ -5251,7 +5186,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
5251 | intel_update_watermarks(crtc); | 5186 | intel_update_watermarks(crtc); |
5252 | 5187 | ||
5253 | mutex_lock(&dev->struct_mutex); | 5188 | mutex_lock(&dev->struct_mutex); |
5254 | intel_update_fbc(dev); | 5189 | intel_fbc_update(dev); |
5255 | mutex_unlock(&dev->struct_mutex); | 5190 | mutex_unlock(&dev->struct_mutex); |
5256 | } | 5191 | } |
5257 | 5192 | ||
@@ -5309,8 +5244,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
5309 | struct drm_device *dev = crtc->dev; | 5244 | struct drm_device *dev = crtc->dev; |
5310 | struct drm_connector *connector; | 5245 | struct drm_connector *connector; |
5311 | struct drm_i915_private *dev_priv = dev->dev_private; | 5246 | struct drm_i915_private *dev_priv = dev->dev_private; |
5312 | struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb); | ||
5313 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
5314 | 5247 | ||
5315 | /* crtc should still be enabled when we disable it. */ | 5248 | /* crtc should still be enabled when we disable it. */ |
5316 | WARN_ON(!crtc->enabled); | 5249 | WARN_ON(!crtc->enabled); |
@@ -5318,14 +5251,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) | |||
5318 | dev_priv->display.crtc_disable(crtc); | 5251 | dev_priv->display.crtc_disable(crtc); |
5319 | dev_priv->display.off(crtc); | 5252 | dev_priv->display.off(crtc); |
5320 | 5253 | ||
5321 | if (crtc->primary->fb) { | 5254 | crtc->primary->funcs->disable_plane(crtc->primary); |
5322 | mutex_lock(&dev->struct_mutex); | ||
5323 | intel_unpin_fb_obj(old_obj); | ||
5324 | i915_gem_track_fb(old_obj, NULL, | ||
5325 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
5326 | mutex_unlock(&dev->struct_mutex); | ||
5327 | crtc->primary->fb = NULL; | ||
5328 | } | ||
5329 | 5255 | ||
5330 | /* Update computed state. */ | 5256 | /* Update computed state. */ |
5331 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 5257 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
@@ -5382,25 +5308,25 @@ static void intel_connector_check_state(struct intel_connector *connector) | |||
5382 | if (connector->mst_port) | 5308 | if (connector->mst_port) |
5383 | return; | 5309 | return; |
5384 | 5310 | ||
5385 | WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, | 5311 | I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, |
5386 | "wrong connector dpms state\n"); | 5312 | "wrong connector dpms state\n"); |
5387 | WARN(connector->base.encoder != &encoder->base, | 5313 | I915_STATE_WARN(connector->base.encoder != &encoder->base, |
5388 | "active connector not linked to encoder\n"); | 5314 | "active connector not linked to encoder\n"); |
5389 | 5315 | ||
5390 | if (encoder) { | 5316 | if (encoder) { |
5391 | WARN(!encoder->connectors_active, | 5317 | I915_STATE_WARN(!encoder->connectors_active, |
5392 | "encoder->connectors_active not set\n"); | 5318 | "encoder->connectors_active not set\n"); |
5393 | 5319 | ||
5394 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); | 5320 | encoder_enabled = encoder->get_hw_state(encoder, &pipe); |
5395 | WARN(!encoder_enabled, "encoder not enabled\n"); | 5321 | I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n"); |
5396 | if (WARN_ON(!encoder->base.crtc)) | 5322 | if (I915_STATE_WARN_ON(!encoder->base.crtc)) |
5397 | return; | 5323 | return; |
5398 | 5324 | ||
5399 | crtc = encoder->base.crtc; | 5325 | crtc = encoder->base.crtc; |
5400 | 5326 | ||
5401 | WARN(!crtc->enabled, "crtc not enabled\n"); | 5327 | I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); |
5402 | WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); | 5328 | I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); |
5403 | WARN(pipe != to_intel_crtc(crtc)->pipe, | 5329 | I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, |
5404 | "encoder active on the wrong pipe\n"); | 5330 | "encoder active on the wrong pipe\n"); |
5405 | } | 5331 | } |
5406 | } | 5332 | } |
@@ -7810,24 +7736,24 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
7810 | struct intel_crtc *crtc; | 7736 | struct intel_crtc *crtc; |
7811 | 7737 | ||
7812 | for_each_intel_crtc(dev, crtc) | 7738 | for_each_intel_crtc(dev, crtc) |
7813 | WARN(crtc->active, "CRTC for pipe %c enabled\n", | 7739 | I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", |
7814 | pipe_name(crtc->pipe)); | 7740 | pipe_name(crtc->pipe)); |
7815 | 7741 | ||
7816 | WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); | 7742 | I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); |
7817 | WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); | 7743 | I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); |
7818 | WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); | 7744 | I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); |
7819 | WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); | 7745 | I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); |
7820 | WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); | 7746 | I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); |
7821 | WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, | 7747 | I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, |
7822 | "CPU PWM1 enabled\n"); | 7748 | "CPU PWM1 enabled\n"); |
7823 | if (IS_HASWELL(dev)) | 7749 | if (IS_HASWELL(dev)) |
7824 | WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, | 7750 | I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, |
7825 | "CPU PWM2 enabled\n"); | 7751 | "CPU PWM2 enabled\n"); |
7826 | WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, | 7752 | I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, |
7827 | "PCH PWM1 enabled\n"); | 7753 | "PCH PWM1 enabled\n"); |
7828 | WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, | 7754 | I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, |
7829 | "Utility pin enabled\n"); | 7755 | "Utility pin enabled\n"); |
7830 | WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); | 7756 | I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); |
7831 | 7757 | ||
7832 | /* | 7758 | /* |
7833 | * In theory we can still leave IRQs enabled, as long as only the HPD | 7759 | * In theory we can still leave IRQs enabled, as long as only the HPD |
@@ -7835,7 +7761,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) | |||
7835 | * gen-specific and since we only disable LCPLL after we fully disable | 7761 | * gen-specific and since we only disable LCPLL after we fully disable |
7836 | * the interrupts, the check below should be enough. | 7762 | * the interrupts, the check below should be enough. |
7837 | */ | 7763 | */ |
7838 | WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); | 7764 | I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); |
7839 | } | 7765 | } |
7840 | 7766 | ||
7841 | static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) | 7767 | static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) |
@@ -8055,12 +7981,21 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, | |||
8055 | enum port port, | 7981 | enum port port, |
8056 | struct intel_crtc_config *pipe_config) | 7982 | struct intel_crtc_config *pipe_config) |
8057 | { | 7983 | { |
8058 | u32 temp; | 7984 | u32 temp, dpll_ctl1; |
8059 | 7985 | ||
8060 | temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); | 7986 | temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); |
8061 | pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); | 7987 | pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); |
8062 | 7988 | ||
8063 | switch (pipe_config->ddi_pll_sel) { | 7989 | switch (pipe_config->ddi_pll_sel) { |
7990 | case SKL_DPLL0: | ||
7991 | /* | ||
7992 | * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part | ||
7993 | * of the shared DPLL framework and thus needs to be read out | ||
7994 | * separately | ||
7995 | */ | ||
7996 | dpll_ctl1 = I915_READ(DPLL_CTRL1); | ||
7997 | pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; | ||
7998 | break; | ||
8064 | case SKL_DPLL1: | 7999 | case SKL_DPLL1: |
8065 | pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; | 8000 | pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; |
8066 | break; | 8001 | break; |
@@ -8286,7 +8221,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
8286 | cntl |= CURSOR_MODE_256_ARGB_AX; | 8221 | cntl |= CURSOR_MODE_256_ARGB_AX; |
8287 | break; | 8222 | break; |
8288 | default: | 8223 | default: |
8289 | WARN_ON(1); | 8224 | MISSING_CASE(intel_crtc->cursor_width); |
8290 | return; | 8225 | return; |
8291 | } | 8226 | } |
8292 | cntl |= pipe << 28; /* Connect to correct pipe */ | 8227 | cntl |= pipe << 28; /* Connect to correct pipe */ |
@@ -8405,109 +8340,6 @@ static bool cursor_size_ok(struct drm_device *dev, | |||
8405 | return true; | 8340 | return true; |
8406 | } | 8341 | } |
8407 | 8342 | ||
8408 | static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, | ||
8409 | struct drm_i915_gem_object *obj, | ||
8410 | uint32_t width, uint32_t height) | ||
8411 | { | ||
8412 | struct drm_device *dev = crtc->dev; | ||
8413 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
8414 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
8415 | enum pipe pipe = intel_crtc->pipe; | ||
8416 | unsigned old_width; | ||
8417 | uint32_t addr; | ||
8418 | int ret; | ||
8419 | |||
8420 | /* if we want to turn off the cursor ignore width and height */ | ||
8421 | if (!obj) { | ||
8422 | DRM_DEBUG_KMS("cursor off\n"); | ||
8423 | addr = 0; | ||
8424 | mutex_lock(&dev->struct_mutex); | ||
8425 | goto finish; | ||
8426 | } | ||
8427 | |||
8428 | /* we only need to pin inside GTT if cursor is non-phy */ | ||
8429 | mutex_lock(&dev->struct_mutex); | ||
8430 | if (!INTEL_INFO(dev)->cursor_needs_physical) { | ||
8431 | unsigned alignment; | ||
8432 | |||
8433 | /* | ||
8434 | * Global gtt pte registers are special registers which actually | ||
8435 | * forward writes to a chunk of system memory. Which means that | ||
8436 | * there is no risk that the register values disappear as soon | ||
8437 | * as we call intel_runtime_pm_put(), so it is correct to wrap | ||
8438 | * only the pin/unpin/fence and not more. | ||
8439 | */ | ||
8440 | intel_runtime_pm_get(dev_priv); | ||
8441 | |||
8442 | /* Note that the w/a also requires 2 PTE of padding following | ||
8443 | * the bo. We currently fill all unused PTE with the shadow | ||
8444 | * page and so we should always have valid PTE following the | ||
8445 | * cursor preventing the VT-d warning. | ||
8446 | */ | ||
8447 | alignment = 0; | ||
8448 | if (need_vtd_wa(dev)) | ||
8449 | alignment = 64*1024; | ||
8450 | |||
8451 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); | ||
8452 | if (ret) { | ||
8453 | DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); | ||
8454 | intel_runtime_pm_put(dev_priv); | ||
8455 | goto fail_locked; | ||
8456 | } | ||
8457 | |||
8458 | ret = i915_gem_object_put_fence(obj); | ||
8459 | if (ret) { | ||
8460 | DRM_DEBUG_KMS("failed to release fence for cursor"); | ||
8461 | intel_runtime_pm_put(dev_priv); | ||
8462 | goto fail_unpin; | ||
8463 | } | ||
8464 | |||
8465 | addr = i915_gem_obj_ggtt_offset(obj); | ||
8466 | |||
8467 | intel_runtime_pm_put(dev_priv); | ||
8468 | } else { | ||
8469 | int align = IS_I830(dev) ? 16 * 1024 : 256; | ||
8470 | ret = i915_gem_object_attach_phys(obj, align); | ||
8471 | if (ret) { | ||
8472 | DRM_DEBUG_KMS("failed to attach phys object\n"); | ||
8473 | goto fail_locked; | ||
8474 | } | ||
8475 | addr = obj->phys_handle->busaddr; | ||
8476 | } | ||
8477 | |||
8478 | finish: | ||
8479 | if (intel_crtc->cursor_bo) { | ||
8480 | if (!INTEL_INFO(dev)->cursor_needs_physical) | ||
8481 | i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); | ||
8482 | } | ||
8483 | |||
8484 | i915_gem_track_fb(intel_crtc->cursor_bo, obj, | ||
8485 | INTEL_FRONTBUFFER_CURSOR(pipe)); | ||
8486 | mutex_unlock(&dev->struct_mutex); | ||
8487 | |||
8488 | old_width = intel_crtc->cursor_width; | ||
8489 | |||
8490 | intel_crtc->cursor_addr = addr; | ||
8491 | intel_crtc->cursor_bo = obj; | ||
8492 | intel_crtc->cursor_width = width; | ||
8493 | intel_crtc->cursor_height = height; | ||
8494 | |||
8495 | if (intel_crtc->active) { | ||
8496 | if (old_width != width) | ||
8497 | intel_update_watermarks(crtc); | ||
8498 | intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); | ||
8499 | |||
8500 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); | ||
8501 | } | ||
8502 | |||
8503 | return 0; | ||
8504 | fail_unpin: | ||
8505 | i915_gem_object_unpin_from_display_plane(obj); | ||
8506 | fail_locked: | ||
8507 | mutex_unlock(&dev->struct_mutex); | ||
8508 | return ret; | ||
8509 | } | ||
8510 | |||
8511 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | 8343 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
8512 | u16 *blue, uint32_t start, uint32_t size) | 8344 | u16 *blue, uint32_t start, uint32_t size) |
8513 | { | 8345 | { |
@@ -9115,7 +8947,10 @@ static void intel_unpin_work_fn(struct work_struct *__work) | |||
9115 | drm_gem_object_unreference(&work->pending_flip_obj->base); | 8947 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
9116 | drm_gem_object_unreference(&work->old_fb_obj->base); | 8948 | drm_gem_object_unreference(&work->old_fb_obj->base); |
9117 | 8949 | ||
9118 | intel_update_fbc(dev); | 8950 | intel_fbc_update(dev); |
8951 | |||
8952 | if (work->flip_queued_req) | ||
8953 | i915_gem_request_assign(&work->flip_queued_req, NULL); | ||
9119 | mutex_unlock(&dev->struct_mutex); | 8954 | mutex_unlock(&dev->struct_mutex); |
9120 | 8955 | ||
9121 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | 8956 | intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); |
@@ -9511,25 +9346,53 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, | |||
9511 | else if (i915.enable_execlists) | 9346 | else if (i915.enable_execlists) |
9512 | return true; | 9347 | return true; |
9513 | else | 9348 | else |
9514 | return ring != obj->ring; | 9349 | return ring != i915_gem_request_get_ring(obj->last_read_req); |
9515 | } | 9350 | } |
9516 | 9351 | ||
9517 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | 9352 | static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) |
9353 | { | ||
9354 | struct drm_device *dev = intel_crtc->base.dev; | ||
9355 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
9356 | struct drm_framebuffer *fb = intel_crtc->base.primary->fb; | ||
9357 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | ||
9358 | struct drm_i915_gem_object *obj = intel_fb->obj; | ||
9359 | const enum pipe pipe = intel_crtc->pipe; | ||
9360 | u32 ctl, stride; | ||
9361 | |||
9362 | ctl = I915_READ(PLANE_CTL(pipe, 0)); | ||
9363 | ctl &= ~PLANE_CTL_TILED_MASK; | ||
9364 | if (obj->tiling_mode == I915_TILING_X) | ||
9365 | ctl |= PLANE_CTL_TILED_X; | ||
9366 | |||
9367 | /* | ||
9368 | * The stride is either expressed as a multiple of 64 bytes chunks for | ||
9369 | * linear buffers or in number of tiles for tiled buffers. | ||
9370 | */ | ||
9371 | stride = fb->pitches[0] >> 6; | ||
9372 | if (obj->tiling_mode == I915_TILING_X) | ||
9373 | stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */ | ||
9374 | |||
9375 | /* | ||
9376 | * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on | ||
9377 | * PLANE_SURF updates, the update is then guaranteed to be atomic. | ||
9378 | */ | ||
9379 | I915_WRITE(PLANE_CTL(pipe, 0), ctl); | ||
9380 | I915_WRITE(PLANE_STRIDE(pipe, 0), stride); | ||
9381 | |||
9382 | I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); | ||
9383 | POSTING_READ(PLANE_SURF(pipe, 0)); | ||
9384 | } | ||
9385 | |||
9386 | static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) | ||
9518 | { | 9387 | { |
9519 | struct drm_device *dev = intel_crtc->base.dev; | 9388 | struct drm_device *dev = intel_crtc->base.dev; |
9520 | struct drm_i915_private *dev_priv = dev->dev_private; | 9389 | struct drm_i915_private *dev_priv = dev->dev_private; |
9521 | struct intel_framebuffer *intel_fb = | 9390 | struct intel_framebuffer *intel_fb = |
9522 | to_intel_framebuffer(intel_crtc->base.primary->fb); | 9391 | to_intel_framebuffer(intel_crtc->base.primary->fb); |
9523 | struct drm_i915_gem_object *obj = intel_fb->obj; | 9392 | struct drm_i915_gem_object *obj = intel_fb->obj; |
9524 | bool atomic_update; | ||
9525 | u32 start_vbl_count; | ||
9526 | u32 dspcntr; | 9393 | u32 dspcntr; |
9527 | u32 reg; | 9394 | u32 reg; |
9528 | 9395 | ||
9529 | intel_mark_page_flip_active(intel_crtc); | ||
9530 | |||
9531 | atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); | ||
9532 | |||
9533 | reg = DSPCNTR(intel_crtc->plane); | 9396 | reg = DSPCNTR(intel_crtc->plane); |
9534 | dspcntr = I915_READ(reg); | 9397 | dspcntr = I915_READ(reg); |
9535 | 9398 | ||
@@ -9544,26 +9407,50 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | |||
9544 | intel_crtc->unpin_work->gtt_offset); | 9407 | intel_crtc->unpin_work->gtt_offset); |
9545 | POSTING_READ(DSPSURF(intel_crtc->plane)); | 9408 | POSTING_READ(DSPSURF(intel_crtc->plane)); |
9546 | 9409 | ||
9410 | } | ||
9411 | |||
9412 | /* | ||
9413 | * XXX: This is the temporary way to update the plane registers until we get | ||
9414 | * around to using the usual plane update functions for MMIO flips | ||
9415 | */ | ||
9416 | static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) | ||
9417 | { | ||
9418 | struct drm_device *dev = intel_crtc->base.dev; | ||
9419 | bool atomic_update; | ||
9420 | u32 start_vbl_count; | ||
9421 | |||
9422 | intel_mark_page_flip_active(intel_crtc); | ||
9423 | |||
9424 | atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); | ||
9425 | |||
9426 | if (INTEL_INFO(dev)->gen >= 9) | ||
9427 | skl_do_mmio_flip(intel_crtc); | ||
9428 | else | ||
9429 | /* use_mmio_flip() retricts MMIO flips to ilk+ */ | ||
9430 | ilk_do_mmio_flip(intel_crtc); | ||
9431 | |||
9547 | if (atomic_update) | 9432 | if (atomic_update) |
9548 | intel_pipe_update_end(intel_crtc, start_vbl_count); | 9433 | intel_pipe_update_end(intel_crtc, start_vbl_count); |
9549 | } | 9434 | } |
9550 | 9435 | ||
9551 | static void intel_mmio_flip_work_func(struct work_struct *work) | 9436 | static void intel_mmio_flip_work_func(struct work_struct *work) |
9552 | { | 9437 | { |
9553 | struct intel_crtc *intel_crtc = | 9438 | struct intel_crtc *crtc = |
9554 | container_of(work, struct intel_crtc, mmio_flip.work); | 9439 | container_of(work, struct intel_crtc, mmio_flip.work); |
9555 | struct intel_engine_cs *ring; | 9440 | struct intel_mmio_flip *mmio_flip; |
9556 | uint32_t seqno; | ||
9557 | |||
9558 | seqno = intel_crtc->mmio_flip.seqno; | ||
9559 | ring = intel_crtc->mmio_flip.ring; | ||
9560 | 9441 | ||
9561 | if (seqno) | 9442 | mmio_flip = &crtc->mmio_flip; |
9562 | WARN_ON(__i915_wait_seqno(ring, seqno, | 9443 | if (mmio_flip->req) |
9563 | intel_crtc->reset_counter, | 9444 | WARN_ON(__i915_wait_request(mmio_flip->req, |
9564 | false, NULL, NULL) != 0); | 9445 | crtc->reset_counter, |
9446 | false, NULL, NULL) != 0); | ||
9565 | 9447 | ||
9566 | intel_do_mmio_flip(intel_crtc); | 9448 | intel_do_mmio_flip(crtc); |
9449 | if (mmio_flip->req) { | ||
9450 | mutex_lock(&crtc->base.dev->struct_mutex); | ||
9451 | i915_gem_request_assign(&mmio_flip->req, NULL); | ||
9452 | mutex_unlock(&crtc->base.dev->struct_mutex); | ||
9453 | } | ||
9567 | } | 9454 | } |
9568 | 9455 | ||
9569 | static int intel_queue_mmio_flip(struct drm_device *dev, | 9456 | static int intel_queue_mmio_flip(struct drm_device *dev, |
@@ -9575,8 +9462,8 @@ static int intel_queue_mmio_flip(struct drm_device *dev, | |||
9575 | { | 9462 | { |
9576 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9463 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9577 | 9464 | ||
9578 | intel_crtc->mmio_flip.seqno = obj->last_write_seqno; | 9465 | i915_gem_request_assign(&intel_crtc->mmio_flip.req, |
9579 | intel_crtc->mmio_flip.ring = obj->ring; | 9466 | obj->last_write_req); |
9580 | 9467 | ||
9581 | schedule_work(&intel_crtc->mmio_flip.work); | 9468 | schedule_work(&intel_crtc->mmio_flip.work); |
9582 | 9469 | ||
@@ -9671,9 +9558,8 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, | |||
9671 | return false; | 9558 | return false; |
9672 | 9559 | ||
9673 | if (work->flip_ready_vblank == 0) { | 9560 | if (work->flip_ready_vblank == 0) { |
9674 | if (work->flip_queued_ring && | 9561 | if (work->flip_queued_req && |
9675 | !i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true), | 9562 | !i915_gem_request_completed(work->flip_queued_req, true)) |
9676 | work->flip_queued_seqno)) | ||
9677 | return false; | 9563 | return false; |
9678 | 9564 | ||
9679 | work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); | 9565 | work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); |
@@ -9726,6 +9612,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9726 | struct drm_framebuffer *old_fb = crtc->primary->fb; | 9612 | struct drm_framebuffer *old_fb = crtc->primary->fb; |
9727 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 9613 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
9728 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9614 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9615 | struct drm_plane *primary = crtc->primary; | ||
9616 | struct intel_plane *intel_plane = to_intel_plane(primary); | ||
9729 | enum pipe pipe = intel_crtc->pipe; | 9617 | enum pipe pipe = intel_crtc->pipe; |
9730 | struct intel_unpin_work *work; | 9618 | struct intel_unpin_work *work; |
9731 | struct intel_engine_cs *ring; | 9619 | struct intel_engine_cs *ring; |
@@ -9818,7 +9706,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9818 | } else if (IS_IVYBRIDGE(dev)) { | 9706 | } else if (IS_IVYBRIDGE(dev)) { |
9819 | ring = &dev_priv->ring[BCS]; | 9707 | ring = &dev_priv->ring[BCS]; |
9820 | } else if (INTEL_INFO(dev)->gen >= 7) { | 9708 | } else if (INTEL_INFO(dev)->gen >= 7) { |
9821 | ring = obj->ring; | 9709 | ring = i915_gem_request_get_ring(obj->last_read_req); |
9822 | if (ring == NULL || ring->id != RCS) | 9710 | if (ring == NULL || ring->id != RCS) |
9823 | ring = &dev_priv->ring[BCS]; | 9711 | ring = &dev_priv->ring[BCS]; |
9824 | } else { | 9712 | } else { |
@@ -9838,16 +9726,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9838 | if (ret) | 9726 | if (ret) |
9839 | goto cleanup_unpin; | 9727 | goto cleanup_unpin; |
9840 | 9728 | ||
9841 | work->flip_queued_seqno = obj->last_write_seqno; | 9729 | i915_gem_request_assign(&work->flip_queued_req, |
9842 | work->flip_queued_ring = obj->ring; | 9730 | obj->last_write_req); |
9843 | } else { | 9731 | } else { |
9844 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, | 9732 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, |
9845 | page_flip_flags); | 9733 | page_flip_flags); |
9846 | if (ret) | 9734 | if (ret) |
9847 | goto cleanup_unpin; | 9735 | goto cleanup_unpin; |
9848 | 9736 | ||
9849 | work->flip_queued_seqno = intel_ring_get_seqno(ring); | 9737 | i915_gem_request_assign(&work->flip_queued_req, |
9850 | work->flip_queued_ring = ring; | 9738 | intel_ring_get_request(ring)); |
9851 | } | 9739 | } |
9852 | 9740 | ||
9853 | work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); | 9741 | work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); |
@@ -9856,7 +9744,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
9856 | i915_gem_track_fb(work->old_fb_obj, obj, | 9744 | i915_gem_track_fb(work->old_fb_obj, obj, |
9857 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | 9745 | INTEL_FRONTBUFFER_PRIMARY(pipe)); |
9858 | 9746 | ||
9859 | intel_disable_fbc(dev); | 9747 | intel_fbc_disable(dev); |
9860 | intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | 9748 | intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); |
9861 | mutex_unlock(&dev->struct_mutex); | 9749 | mutex_unlock(&dev->struct_mutex); |
9862 | 9750 | ||
@@ -9884,8 +9772,15 @@ free_work: | |||
9884 | 9772 | ||
9885 | if (ret == -EIO) { | 9773 | if (ret == -EIO) { |
9886 | out_hang: | 9774 | out_hang: |
9887 | intel_crtc_wait_for_pending_flips(crtc); | 9775 | ret = primary->funcs->update_plane(primary, crtc, fb, |
9888 | ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); | 9776 | intel_plane->crtc_x, |
9777 | intel_plane->crtc_y, | ||
9778 | intel_plane->crtc_h, | ||
9779 | intel_plane->crtc_w, | ||
9780 | intel_plane->src_x, | ||
9781 | intel_plane->src_y, | ||
9782 | intel_plane->src_h, | ||
9783 | intel_plane->src_w); | ||
9889 | if (ret == 0 && event) { | 9784 | if (ret == 0 && event) { |
9890 | spin_lock_irq(&dev->event_lock); | 9785 | spin_lock_irq(&dev->event_lock); |
9891 | drm_send_vblank_event(dev, pipe, event); | 9786 | drm_send_vblank_event(dev, pipe, event); |
@@ -10254,9 +10149,9 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
10254 | * computation to clearly distinguish it from the adjusted mode, which | 10149 | * computation to clearly distinguish it from the adjusted mode, which |
10255 | * can be changed by the connectors in the below retry loop. | 10150 | * can be changed by the connectors in the below retry loop. |
10256 | */ | 10151 | */ |
10257 | drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE); | 10152 | drm_crtc_get_hv_timing(&pipe_config->requested_mode, |
10258 | pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay; | 10153 | &pipe_config->pipe_src_w, |
10259 | pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay; | 10154 | &pipe_config->pipe_src_h); |
10260 | 10155 | ||
10261 | encoder_retry: | 10156 | encoder_retry: |
10262 | /* Ensure the port clock defaults are reset when retrying. */ | 10157 | /* Ensure the port clock defaults are reset when retrying. */ |
@@ -10742,7 +10637,7 @@ check_connector_state(struct drm_device *dev) | |||
10742 | * ->get_hw_state callbacks. */ | 10637 | * ->get_hw_state callbacks. */ |
10743 | intel_connector_check_state(connector); | 10638 | intel_connector_check_state(connector); |
10744 | 10639 | ||
10745 | WARN(&connector->new_encoder->base != connector->base.encoder, | 10640 | I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder, |
10746 | "connector's staged encoder doesn't match current encoder\n"); | 10641 | "connector's staged encoder doesn't match current encoder\n"); |
10747 | } | 10642 | } |
10748 | } | 10643 | } |
@@ -10762,9 +10657,9 @@ check_encoder_state(struct drm_device *dev) | |||
10762 | encoder->base.base.id, | 10657 | encoder->base.base.id, |
10763 | encoder->base.name); | 10658 | encoder->base.name); |
10764 | 10659 | ||
10765 | WARN(&encoder->new_crtc->base != encoder->base.crtc, | 10660 | I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc, |
10766 | "encoder's stage crtc doesn't match current crtc\n"); | 10661 | "encoder's stage crtc doesn't match current crtc\n"); |
10767 | WARN(encoder->connectors_active && !encoder->base.crtc, | 10662 | I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, |
10768 | "encoder's active_connectors set, but no crtc\n"); | 10663 | "encoder's active_connectors set, but no crtc\n"); |
10769 | 10664 | ||
10770 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10665 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
@@ -10783,19 +10678,19 @@ check_encoder_state(struct drm_device *dev) | |||
10783 | if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) | 10678 | if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) |
10784 | continue; | 10679 | continue; |
10785 | 10680 | ||
10786 | WARN(!!encoder->base.crtc != enabled, | 10681 | I915_STATE_WARN(!!encoder->base.crtc != enabled, |
10787 | "encoder's enabled state mismatch " | 10682 | "encoder's enabled state mismatch " |
10788 | "(expected %i, found %i)\n", | 10683 | "(expected %i, found %i)\n", |
10789 | !!encoder->base.crtc, enabled); | 10684 | !!encoder->base.crtc, enabled); |
10790 | WARN(active && !encoder->base.crtc, | 10685 | I915_STATE_WARN(active && !encoder->base.crtc, |
10791 | "active encoder with no crtc\n"); | 10686 | "active encoder with no crtc\n"); |
10792 | 10687 | ||
10793 | WARN(encoder->connectors_active != active, | 10688 | I915_STATE_WARN(encoder->connectors_active != active, |
10794 | "encoder's computed active state doesn't match tracked active state " | 10689 | "encoder's computed active state doesn't match tracked active state " |
10795 | "(expected %i, found %i)\n", active, encoder->connectors_active); | 10690 | "(expected %i, found %i)\n", active, encoder->connectors_active); |
10796 | 10691 | ||
10797 | active = encoder->get_hw_state(encoder, &pipe); | 10692 | active = encoder->get_hw_state(encoder, &pipe); |
10798 | WARN(active != encoder->connectors_active, | 10693 | I915_STATE_WARN(active != encoder->connectors_active, |
10799 | "encoder's hw state doesn't match sw tracking " | 10694 | "encoder's hw state doesn't match sw tracking " |
10800 | "(expected %i, found %i)\n", | 10695 | "(expected %i, found %i)\n", |
10801 | encoder->connectors_active, active); | 10696 | encoder->connectors_active, active); |
@@ -10804,7 +10699,7 @@ check_encoder_state(struct drm_device *dev) | |||
10804 | continue; | 10699 | continue; |
10805 | 10700 | ||
10806 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; | 10701 | tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; |
10807 | WARN(active && pipe != tracked_pipe, | 10702 | I915_STATE_WARN(active && pipe != tracked_pipe, |
10808 | "active encoder's pipe doesn't match" | 10703 | "active encoder's pipe doesn't match" |
10809 | "(expected %i, found %i)\n", | 10704 | "(expected %i, found %i)\n", |
10810 | tracked_pipe, pipe); | 10705 | tracked_pipe, pipe); |
@@ -10829,7 +10724,7 @@ check_crtc_state(struct drm_device *dev) | |||
10829 | DRM_DEBUG_KMS("[CRTC:%d]\n", | 10724 | DRM_DEBUG_KMS("[CRTC:%d]\n", |
10830 | crtc->base.base.id); | 10725 | crtc->base.base.id); |
10831 | 10726 | ||
10832 | WARN(crtc->active && !crtc->base.enabled, | 10727 | I915_STATE_WARN(crtc->active && !crtc->base.enabled, |
10833 | "active crtc, but not enabled in sw tracking\n"); | 10728 | "active crtc, but not enabled in sw tracking\n"); |
10834 | 10729 | ||
10835 | for_each_intel_encoder(dev, encoder) { | 10730 | for_each_intel_encoder(dev, encoder) { |
@@ -10840,10 +10735,10 @@ check_crtc_state(struct drm_device *dev) | |||
10840 | active = true; | 10735 | active = true; |
10841 | } | 10736 | } |
10842 | 10737 | ||
10843 | WARN(active != crtc->active, | 10738 | I915_STATE_WARN(active != crtc->active, |
10844 | "crtc's computed active state doesn't match tracked active state " | 10739 | "crtc's computed active state doesn't match tracked active state " |
10845 | "(expected %i, found %i)\n", active, crtc->active); | 10740 | "(expected %i, found %i)\n", active, crtc->active); |
10846 | WARN(enabled != crtc->base.enabled, | 10741 | I915_STATE_WARN(enabled != crtc->base.enabled, |
10847 | "crtc's computed enabled state doesn't match tracked enabled state " | 10742 | "crtc's computed enabled state doesn't match tracked enabled state " |
10848 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); | 10743 | "(expected %i, found %i)\n", enabled, crtc->base.enabled); |
10849 | 10744 | ||
@@ -10863,13 +10758,13 @@ check_crtc_state(struct drm_device *dev) | |||
10863 | encoder->get_config(encoder, &pipe_config); | 10758 | encoder->get_config(encoder, &pipe_config); |
10864 | } | 10759 | } |
10865 | 10760 | ||
10866 | WARN(crtc->active != active, | 10761 | I915_STATE_WARN(crtc->active != active, |
10867 | "crtc active state doesn't match with hw state " | 10762 | "crtc active state doesn't match with hw state " |
10868 | "(expected %i, found %i)\n", crtc->active, active); | 10763 | "(expected %i, found %i)\n", crtc->active, active); |
10869 | 10764 | ||
10870 | if (active && | 10765 | if (active && |
10871 | !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { | 10766 | !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) { |
10872 | WARN(1, "pipe state doesn't match!\n"); | 10767 | I915_STATE_WARN(1, "pipe state doesn't match!\n"); |
10873 | intel_dump_pipe_config(crtc, &pipe_config, | 10768 | intel_dump_pipe_config(crtc, &pipe_config, |
10874 | "[hw state]"); | 10769 | "[hw state]"); |
10875 | intel_dump_pipe_config(crtc, &crtc->config, | 10770 | intel_dump_pipe_config(crtc, &crtc->config, |
@@ -10897,14 +10792,14 @@ check_shared_dpll_state(struct drm_device *dev) | |||
10897 | 10792 | ||
10898 | active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); | 10793 | active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); |
10899 | 10794 | ||
10900 | WARN(pll->active > hweight32(pll->config.crtc_mask), | 10795 | I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), |
10901 | "more active pll users than references: %i vs %i\n", | 10796 | "more active pll users than references: %i vs %i\n", |
10902 | pll->active, hweight32(pll->config.crtc_mask)); | 10797 | pll->active, hweight32(pll->config.crtc_mask)); |
10903 | WARN(pll->active && !pll->on, | 10798 | I915_STATE_WARN(pll->active && !pll->on, |
10904 | "pll in active use but not on in sw tracking\n"); | 10799 | "pll in active use but not on in sw tracking\n"); |
10905 | WARN(pll->on && !pll->active, | 10800 | I915_STATE_WARN(pll->on && !pll->active, |
10906 | "pll in on but not on in use in sw tracking\n"); | 10801 | "pll in on but not on in use in sw tracking\n"); |
10907 | WARN(pll->on != active, | 10802 | I915_STATE_WARN(pll->on != active, |
10908 | "pll on state mismatch (expected %i, found %i)\n", | 10803 | "pll on state mismatch (expected %i, found %i)\n", |
10909 | pll->on, active); | 10804 | pll->on, active); |
10910 | 10805 | ||
@@ -10914,14 +10809,14 @@ check_shared_dpll_state(struct drm_device *dev) | |||
10914 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) | 10809 | if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) |
10915 | active_crtcs++; | 10810 | active_crtcs++; |
10916 | } | 10811 | } |
10917 | WARN(pll->active != active_crtcs, | 10812 | I915_STATE_WARN(pll->active != active_crtcs, |
10918 | "pll active crtcs mismatch (expected %i, found %i)\n", | 10813 | "pll active crtcs mismatch (expected %i, found %i)\n", |
10919 | pll->active, active_crtcs); | 10814 | pll->active, active_crtcs); |
10920 | WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, | 10815 | I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, |
10921 | "pll enabled crtcs mismatch (expected %i, found %i)\n", | 10816 | "pll enabled crtcs mismatch (expected %i, found %i)\n", |
10922 | hweight32(pll->config.crtc_mask), enabled_crtcs); | 10817 | hweight32(pll->config.crtc_mask), enabled_crtcs); |
10923 | 10818 | ||
10924 | WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, | 10819 | I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, |
10925 | sizeof(dpll_hw_state)), | 10820 | sizeof(dpll_hw_state)), |
10926 | "pll hw state mismatch\n"); | 10821 | "pll hw state mismatch\n"); |
10927 | } | 10822 | } |
@@ -11114,26 +11009,15 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
11114 | * on the DPLL. | 11009 | * on the DPLL. |
11115 | */ | 11010 | */ |
11116 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { | 11011 | for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
11117 | struct drm_framebuffer *old_fb = crtc->primary->fb; | 11012 | struct drm_plane *primary = intel_crtc->base.primary; |
11118 | struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); | 11013 | int vdisplay, hdisplay; |
11119 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
11120 | 11014 | ||
11121 | mutex_lock(&dev->struct_mutex); | 11015 | drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); |
11122 | ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL); | 11016 | ret = primary->funcs->update_plane(primary, &intel_crtc->base, |
11123 | if (ret != 0) { | 11017 | fb, 0, 0, |
11124 | DRM_ERROR("pin & fence failed\n"); | 11018 | hdisplay, vdisplay, |
11125 | mutex_unlock(&dev->struct_mutex); | 11019 | x << 16, y << 16, |
11126 | goto done; | 11020 | hdisplay << 16, vdisplay << 16); |
11127 | } | ||
11128 | if (old_fb) | ||
11129 | intel_unpin_fb_obj(old_obj); | ||
11130 | i915_gem_track_fb(old_obj, obj, | ||
11131 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
11132 | mutex_unlock(&dev->struct_mutex); | ||
11133 | |||
11134 | crtc->primary->fb = fb; | ||
11135 | crtc->x = x; | ||
11136 | crtc->y = y; | ||
11137 | } | 11021 | } |
11138 | 11022 | ||
11139 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ | 11023 | /* Now enable the clocks, plane, pipe, and connectors that we set up. */ |
@@ -11601,11 +11485,14 @@ static int intel_crtc_set_config(struct drm_mode_set *set) | |||
11601 | disable_pipes); | 11485 | disable_pipes); |
11602 | } else if (config->fb_changed) { | 11486 | } else if (config->fb_changed) { |
11603 | struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); | 11487 | struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); |
11488 | struct drm_plane *primary = set->crtc->primary; | ||
11489 | int vdisplay, hdisplay; | ||
11604 | 11490 | ||
11605 | intel_crtc_wait_for_pending_flips(set->crtc); | 11491 | drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); |
11606 | 11492 | ret = primary->funcs->update_plane(primary, set->crtc, set->fb, | |
11607 | ret = intel_pipe_set_base(set->crtc, | 11493 | 0, 0, hdisplay, vdisplay, |
11608 | set->x, set->y, set->fb); | 11494 | set->x << 16, set->y << 16, |
11495 | hdisplay << 16, vdisplay << 16); | ||
11609 | 11496 | ||
11610 | /* | 11497 | /* |
11611 | * We need to make sure the primary plane is re-enabled if it | 11498 | * We need to make sure the primary plane is re-enabled if it |
@@ -11762,95 +11649,115 @@ static void intel_shared_dpll_init(struct drm_device *dev) | |||
11762 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); | 11649 | BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); |
11763 | } | 11650 | } |
11764 | 11651 | ||
11765 | static int | 11652 | /** |
11766 | intel_primary_plane_disable(struct drm_plane *plane) | 11653 | * intel_prepare_plane_fb - Prepare fb for usage on plane |
11654 | * @plane: drm plane to prepare for | ||
11655 | * @fb: framebuffer to prepare for presentation | ||
11656 | * | ||
11657 | * Prepares a framebuffer for usage on a display plane. Generally this | ||
11658 | * involves pinning the underlying object and updating the frontbuffer tracking | ||
11659 | * bits. Some older platforms need special physical address handling for | ||
11660 | * cursor planes. | ||
11661 | * | ||
11662 | * Returns 0 on success, negative error code on failure. | ||
11663 | */ | ||
11664 | int | ||
11665 | intel_prepare_plane_fb(struct drm_plane *plane, | ||
11666 | struct drm_framebuffer *fb) | ||
11767 | { | 11667 | { |
11768 | struct drm_device *dev = plane->dev; | 11668 | struct drm_device *dev = plane->dev; |
11769 | struct intel_crtc *intel_crtc; | 11669 | struct intel_plane *intel_plane = to_intel_plane(plane); |
11670 | enum pipe pipe = intel_plane->pipe; | ||
11671 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
11672 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); | ||
11673 | unsigned frontbuffer_bits = 0; | ||
11674 | int ret = 0; | ||
11770 | 11675 | ||
11771 | if (!plane->fb) | 11676 | if (WARN_ON(fb == plane->fb || !obj)) |
11772 | return 0; | 11677 | return 0; |
11773 | 11678 | ||
11774 | BUG_ON(!plane->crtc); | 11679 | switch (plane->type) { |
11680 | case DRM_PLANE_TYPE_PRIMARY: | ||
11681 | frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe); | ||
11682 | break; | ||
11683 | case DRM_PLANE_TYPE_CURSOR: | ||
11684 | frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe); | ||
11685 | break; | ||
11686 | case DRM_PLANE_TYPE_OVERLAY: | ||
11687 | frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe); | ||
11688 | break; | ||
11689 | } | ||
11775 | 11690 | ||
11776 | intel_crtc = to_intel_crtc(plane->crtc); | 11691 | mutex_lock(&dev->struct_mutex); |
11777 | 11692 | ||
11778 | /* | 11693 | if (plane->type == DRM_PLANE_TYPE_CURSOR && |
11779 | * Even though we checked plane->fb above, it's still possible that | 11694 | INTEL_INFO(dev)->cursor_needs_physical) { |
11780 | * the primary plane has been implicitly disabled because the crtc | 11695 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
11781 | * coordinates given weren't visible, or because we detected | 11696 | ret = i915_gem_object_attach_phys(obj, align); |
11782 | * that it was 100% covered by a sprite plane. Or, the CRTC may be | 11697 | if (ret) |
11783 | * off and we've set a fb, but haven't actually turned on the CRTC yet. | 11698 | DRM_DEBUG_KMS("failed to attach phys object\n"); |
11784 | * In either case, we need to unpin the FB and let the fb pointer get | 11699 | } else { |
11785 | * updated, but otherwise we don't need to touch the hardware. | 11700 | ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); |
11786 | */ | 11701 | } |
11787 | if (!intel_crtc->primary_enabled) | ||
11788 | goto disable_unpin; | ||
11789 | 11702 | ||
11790 | intel_crtc_wait_for_pending_flips(plane->crtc); | 11703 | if (ret == 0) |
11791 | intel_disable_primary_hw_plane(plane, plane->crtc); | 11704 | i915_gem_track_fb(old_obj, obj, frontbuffer_bits); |
11792 | 11705 | ||
11793 | disable_unpin: | ||
11794 | mutex_lock(&dev->struct_mutex); | ||
11795 | i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, | ||
11796 | INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); | ||
11797 | intel_unpin_fb_obj(intel_fb_obj(plane->fb)); | ||
11798 | mutex_unlock(&dev->struct_mutex); | 11706 | mutex_unlock(&dev->struct_mutex); |
11799 | plane->fb = NULL; | ||
11800 | 11707 | ||
11801 | return 0; | 11708 | return ret; |
11709 | } | ||
11710 | |||
11711 | /** | ||
11712 | * intel_cleanup_plane_fb - Cleans up an fb after plane use | ||
11713 | * @plane: drm plane to clean up for | ||
11714 | * @fb: old framebuffer that was on plane | ||
11715 | * | ||
11716 | * Cleans up a framebuffer that has just been removed from a plane. | ||
11717 | */ | ||
11718 | void | ||
11719 | intel_cleanup_plane_fb(struct drm_plane *plane, | ||
11720 | struct drm_framebuffer *fb) | ||
11721 | { | ||
11722 | struct drm_device *dev = plane->dev; | ||
11723 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
11724 | |||
11725 | if (WARN_ON(!obj)) | ||
11726 | return; | ||
11727 | |||
11728 | if (plane->type != DRM_PLANE_TYPE_CURSOR || | ||
11729 | !INTEL_INFO(dev)->cursor_needs_physical) { | ||
11730 | mutex_lock(&dev->struct_mutex); | ||
11731 | intel_unpin_fb_obj(obj); | ||
11732 | mutex_unlock(&dev->struct_mutex); | ||
11733 | } | ||
11802 | } | 11734 | } |
11803 | 11735 | ||
11804 | static int | 11736 | static int |
11805 | intel_check_primary_plane(struct drm_plane *plane, | 11737 | intel_check_primary_plane(struct drm_plane *plane, |
11806 | struct intel_plane_state *state) | 11738 | struct intel_plane_state *state) |
11807 | { | 11739 | { |
11808 | struct drm_crtc *crtc = state->crtc; | 11740 | struct drm_crtc *crtc = state->base.crtc; |
11809 | struct drm_framebuffer *fb = state->fb; | 11741 | struct drm_framebuffer *fb = state->base.fb; |
11810 | struct drm_rect *dest = &state->dst; | 11742 | struct drm_rect *dest = &state->dst; |
11811 | struct drm_rect *src = &state->src; | 11743 | struct drm_rect *src = &state->src; |
11812 | const struct drm_rect *clip = &state->clip; | 11744 | const struct drm_rect *clip = &state->clip; |
11813 | |||
11814 | return drm_plane_helper_check_update(plane, crtc, fb, | ||
11815 | src, dest, clip, | ||
11816 | DRM_PLANE_HELPER_NO_SCALING, | ||
11817 | DRM_PLANE_HELPER_NO_SCALING, | ||
11818 | false, true, &state->visible); | ||
11819 | } | ||
11820 | |||
11821 | static int | ||
11822 | intel_prepare_primary_plane(struct drm_plane *plane, | ||
11823 | struct intel_plane_state *state) | ||
11824 | { | ||
11825 | struct drm_crtc *crtc = state->crtc; | ||
11826 | struct drm_framebuffer *fb = state->fb; | ||
11827 | struct drm_device *dev = crtc->dev; | ||
11828 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
11829 | enum pipe pipe = intel_crtc->pipe; | ||
11830 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
11831 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); | ||
11832 | int ret; | 11745 | int ret; |
11833 | 11746 | ||
11834 | intel_crtc_wait_for_pending_flips(crtc); | 11747 | ret = drm_plane_helper_check_update(plane, crtc, fb, |
11748 | src, dest, clip, | ||
11749 | DRM_PLANE_HELPER_NO_SCALING, | ||
11750 | DRM_PLANE_HELPER_NO_SCALING, | ||
11751 | false, true, &state->visible); | ||
11752 | if (ret) | ||
11753 | return ret; | ||
11835 | 11754 | ||
11755 | intel_crtc_wait_for_pending_flips(crtc); | ||
11836 | if (intel_crtc_has_pending_flip(crtc)) { | 11756 | if (intel_crtc_has_pending_flip(crtc)) { |
11837 | DRM_ERROR("pipe is still busy with an old pageflip\n"); | 11757 | DRM_ERROR("pipe is still busy with an old pageflip\n"); |
11838 | return -EBUSY; | 11758 | return -EBUSY; |
11839 | } | 11759 | } |
11840 | 11760 | ||
11841 | if (old_obj != obj) { | ||
11842 | mutex_lock(&dev->struct_mutex); | ||
11843 | ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); | ||
11844 | if (ret == 0) | ||
11845 | i915_gem_track_fb(old_obj, obj, | ||
11846 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
11847 | mutex_unlock(&dev->struct_mutex); | ||
11848 | if (ret != 0) { | ||
11849 | DRM_DEBUG_KMS("pin & fence failed\n"); | ||
11850 | return ret; | ||
11851 | } | ||
11852 | } | ||
11853 | |||
11854 | return 0; | 11761 | return 0; |
11855 | } | 11762 | } |
11856 | 11763 | ||
@@ -11858,19 +11765,28 @@ static void | |||
11858 | intel_commit_primary_plane(struct drm_plane *plane, | 11765 | intel_commit_primary_plane(struct drm_plane *plane, |
11859 | struct intel_plane_state *state) | 11766 | struct intel_plane_state *state) |
11860 | { | 11767 | { |
11861 | struct drm_crtc *crtc = state->crtc; | 11768 | struct drm_crtc *crtc = state->base.crtc; |
11862 | struct drm_framebuffer *fb = state->fb; | 11769 | struct drm_framebuffer *fb = state->base.fb; |
11863 | struct drm_device *dev = crtc->dev; | 11770 | struct drm_device *dev = plane->dev; |
11864 | struct drm_i915_private *dev_priv = dev->dev_private; | 11771 | struct drm_i915_private *dev_priv = dev->dev_private; |
11865 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11772 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11866 | enum pipe pipe = intel_crtc->pipe; | ||
11867 | struct drm_framebuffer *old_fb = plane->fb; | ||
11868 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 11773 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
11869 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); | ||
11870 | struct intel_plane *intel_plane = to_intel_plane(plane); | 11774 | struct intel_plane *intel_plane = to_intel_plane(plane); |
11871 | struct drm_rect *src = &state->src; | 11775 | struct drm_rect *src = &state->src; |
11776 | enum pipe pipe = intel_plane->pipe; | ||
11872 | 11777 | ||
11873 | crtc->primary->fb = fb; | 11778 | if (!fb) { |
11779 | /* | ||
11780 | * 'prepare' is never called when plane is being disabled, so | ||
11781 | * we need to handle frontbuffer tracking here | ||
11782 | */ | ||
11783 | mutex_lock(&dev->struct_mutex); | ||
11784 | i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, | ||
11785 | INTEL_FRONTBUFFER_PRIMARY(pipe)); | ||
11786 | mutex_unlock(&dev->struct_mutex); | ||
11787 | } | ||
11788 | |||
11789 | plane->fb = fb; | ||
11874 | crtc->x = src->x1 >> 16; | 11790 | crtc->x = src->x1 >> 16; |
11875 | crtc->y = src->y1 >> 16; | 11791 | crtc->y = src->y1 >> 16; |
11876 | 11792 | ||
@@ -11899,7 +11815,7 @@ intel_commit_primary_plane(struct drm_plane *plane, | |||
11899 | INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | 11815 | INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && |
11900 | dev_priv->fbc.plane == intel_crtc->plane && | 11816 | dev_priv->fbc.plane == intel_crtc->plane && |
11901 | intel_plane->rotation != BIT(DRM_ROTATE_0)) { | 11817 | intel_plane->rotation != BIT(DRM_ROTATE_0)) { |
11902 | intel_disable_fbc(dev); | 11818 | intel_fbc_disable(dev); |
11903 | } | 11819 | } |
11904 | 11820 | ||
11905 | if (state->visible) { | 11821 | if (state->visible) { |
@@ -11934,33 +11850,28 @@ intel_commit_primary_plane(struct drm_plane *plane, | |||
11934 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); | 11850 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); |
11935 | 11851 | ||
11936 | mutex_lock(&dev->struct_mutex); | 11852 | mutex_lock(&dev->struct_mutex); |
11937 | intel_update_fbc(dev); | 11853 | intel_fbc_update(dev); |
11938 | mutex_unlock(&dev->struct_mutex); | ||
11939 | } | ||
11940 | |||
11941 | if (old_fb && old_fb != fb) { | ||
11942 | if (intel_crtc->active) | ||
11943 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
11944 | |||
11945 | mutex_lock(&dev->struct_mutex); | ||
11946 | intel_unpin_fb_obj(old_obj); | ||
11947 | mutex_unlock(&dev->struct_mutex); | 11854 | mutex_unlock(&dev->struct_mutex); |
11948 | } | 11855 | } |
11949 | } | 11856 | } |
11950 | 11857 | ||
11951 | static int | 11858 | int |
11952 | intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, | 11859 | intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, |
11953 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | 11860 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, |
11954 | unsigned int crtc_w, unsigned int crtc_h, | 11861 | unsigned int crtc_w, unsigned int crtc_h, |
11955 | uint32_t src_x, uint32_t src_y, | 11862 | uint32_t src_x, uint32_t src_y, |
11956 | uint32_t src_w, uint32_t src_h) | 11863 | uint32_t src_w, uint32_t src_h) |
11957 | { | 11864 | { |
11865 | struct drm_device *dev = plane->dev; | ||
11866 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
11867 | struct drm_framebuffer *old_fb = plane->fb; | ||
11958 | struct intel_plane_state state; | 11868 | struct intel_plane_state state; |
11869 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
11959 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 11870 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
11960 | int ret; | 11871 | int ret; |
11961 | 11872 | ||
11962 | state.crtc = crtc; | 11873 | state.base.crtc = crtc ? crtc : plane->crtc; |
11963 | state.fb = fb; | 11874 | state.base.fb = fb; |
11964 | 11875 | ||
11965 | /* sample coordinates in 16.16 fixed point */ | 11876 | /* sample coordinates in 16.16 fixed point */ |
11966 | state.src.x1 = src_x; | 11877 | state.src.x1 = src_x; |
@@ -11982,19 +11893,50 @@ intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
11982 | state.orig_src = state.src; | 11893 | state.orig_src = state.src; |
11983 | state.orig_dst = state.dst; | 11894 | state.orig_dst = state.dst; |
11984 | 11895 | ||
11985 | ret = intel_check_primary_plane(plane, &state); | 11896 | ret = intel_plane->check_plane(plane, &state); |
11986 | if (ret) | 11897 | if (ret) |
11987 | return ret; | 11898 | return ret; |
11988 | 11899 | ||
11989 | ret = intel_prepare_primary_plane(plane, &state); | 11900 | if (fb != old_fb && fb) { |
11990 | if (ret) | 11901 | ret = intel_prepare_plane_fb(plane, fb); |
11991 | return ret; | 11902 | if (ret) |
11903 | return ret; | ||
11904 | } | ||
11905 | |||
11906 | intel_runtime_pm_get(dev_priv); | ||
11907 | intel_plane->commit_plane(plane, &state); | ||
11908 | intel_runtime_pm_put(dev_priv); | ||
11909 | |||
11910 | if (fb != old_fb && old_fb) { | ||
11911 | if (intel_crtc->active) | ||
11912 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
11913 | intel_cleanup_plane_fb(plane, old_fb); | ||
11914 | } | ||
11992 | 11915 | ||
11993 | intel_commit_primary_plane(plane, &state); | 11916 | plane->fb = fb; |
11994 | 11917 | ||
11995 | return 0; | 11918 | return 0; |
11996 | } | 11919 | } |
11997 | 11920 | ||
11921 | /** | ||
11922 | * intel_disable_plane - disable a plane | ||
11923 | * @plane: plane to disable | ||
11924 | * | ||
11925 | * General disable handler for all plane types. | ||
11926 | */ | ||
11927 | int | ||
11928 | intel_disable_plane(struct drm_plane *plane) | ||
11929 | { | ||
11930 | if (!plane->fb) | ||
11931 | return 0; | ||
11932 | |||
11933 | if (WARN_ON(!plane->crtc)) | ||
11934 | return -EINVAL; | ||
11935 | |||
11936 | return plane->funcs->update_plane(plane, plane->crtc, NULL, | ||
11937 | 0, 0, 0, 0, 0, 0, 0, 0); | ||
11938 | } | ||
11939 | |||
11998 | /* Common destruction function for both primary and cursor planes */ | 11940 | /* Common destruction function for both primary and cursor planes */ |
11999 | static void intel_plane_destroy(struct drm_plane *plane) | 11941 | static void intel_plane_destroy(struct drm_plane *plane) |
12000 | { | 11942 | { |
@@ -12004,8 +11946,8 @@ static void intel_plane_destroy(struct drm_plane *plane) | |||
12004 | } | 11946 | } |
12005 | 11947 | ||
12006 | static const struct drm_plane_funcs intel_primary_plane_funcs = { | 11948 | static const struct drm_plane_funcs intel_primary_plane_funcs = { |
12007 | .update_plane = intel_primary_plane_setplane, | 11949 | .update_plane = intel_update_plane, |
12008 | .disable_plane = intel_primary_plane_disable, | 11950 | .disable_plane = intel_disable_plane, |
12009 | .destroy = intel_plane_destroy, | 11951 | .destroy = intel_plane_destroy, |
12010 | .set_property = intel_plane_set_property | 11952 | .set_property = intel_plane_set_property |
12011 | }; | 11953 | }; |
@@ -12026,6 +11968,8 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, | |||
12026 | primary->pipe = pipe; | 11968 | primary->pipe = pipe; |
12027 | primary->plane = pipe; | 11969 | primary->plane = pipe; |
12028 | primary->rotation = BIT(DRM_ROTATE_0); | 11970 | primary->rotation = BIT(DRM_ROTATE_0); |
11971 | primary->check_plane = intel_check_primary_plane; | ||
11972 | primary->commit_plane = intel_commit_primary_plane; | ||
12029 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) | 11973 | if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) |
12030 | primary->plane = !pipe; | 11974 | primary->plane = !pipe; |
12031 | 11975 | ||
@@ -12058,23 +12002,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, | |||
12058 | } | 12002 | } |
12059 | 12003 | ||
12060 | static int | 12004 | static int |
12061 | intel_cursor_plane_disable(struct drm_plane *plane) | ||
12062 | { | ||
12063 | if (!plane->fb) | ||
12064 | return 0; | ||
12065 | |||
12066 | BUG_ON(!plane->crtc); | ||
12067 | |||
12068 | return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0); | ||
12069 | } | ||
12070 | |||
12071 | static int | ||
12072 | intel_check_cursor_plane(struct drm_plane *plane, | 12005 | intel_check_cursor_plane(struct drm_plane *plane, |
12073 | struct intel_plane_state *state) | 12006 | struct intel_plane_state *state) |
12074 | { | 12007 | { |
12075 | struct drm_crtc *crtc = state->crtc; | 12008 | struct drm_crtc *crtc = state->base.crtc; |
12076 | struct drm_device *dev = crtc->dev; | 12009 | struct drm_device *dev = crtc->dev; |
12077 | struct drm_framebuffer *fb = state->fb; | 12010 | struct drm_framebuffer *fb = state->base.fb; |
12078 | struct drm_rect *dest = &state->dst; | 12011 | struct drm_rect *dest = &state->dst; |
12079 | struct drm_rect *src = &state->src; | 12012 | struct drm_rect *src = &state->src; |
12080 | const struct drm_rect *clip = &state->clip; | 12013 | const struct drm_rect *clip = &state->clip; |
@@ -12124,18 +12057,21 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
12124 | return ret; | 12057 | return ret; |
12125 | } | 12058 | } |
12126 | 12059 | ||
12127 | static int | 12060 | static void |
12128 | intel_commit_cursor_plane(struct drm_plane *plane, | 12061 | intel_commit_cursor_plane(struct drm_plane *plane, |
12129 | struct intel_plane_state *state) | 12062 | struct intel_plane_state *state) |
12130 | { | 12063 | { |
12131 | struct drm_crtc *crtc = state->crtc; | 12064 | struct drm_crtc *crtc = state->base.crtc; |
12132 | struct drm_framebuffer *fb = state->fb; | 12065 | struct drm_device *dev = crtc->dev; |
12133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 12066 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
12134 | struct intel_plane *intel_plane = to_intel_plane(plane); | 12067 | struct intel_plane *intel_plane = to_intel_plane(plane); |
12135 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | 12068 | struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); |
12136 | struct drm_i915_gem_object *obj = intel_fb->obj; | 12069 | struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); |
12137 | int crtc_w, crtc_h; | 12070 | enum pipe pipe = intel_crtc->pipe; |
12071 | unsigned old_width; | ||
12072 | uint32_t addr; | ||
12138 | 12073 | ||
12074 | plane->fb = state->base.fb; | ||
12139 | crtc->cursor_x = state->orig_dst.x1; | 12075 | crtc->cursor_x = state->orig_dst.x1; |
12140 | crtc->cursor_y = state->orig_dst.y1; | 12076 | crtc->cursor_y = state->orig_dst.y1; |
12141 | 12077 | ||
@@ -12149,64 +12085,47 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
12149 | intel_plane->src_h = drm_rect_height(&state->orig_src); | 12085 | intel_plane->src_h = drm_rect_height(&state->orig_src); |
12150 | intel_plane->obj = obj; | 12086 | intel_plane->obj = obj; |
12151 | 12087 | ||
12152 | if (fb != crtc->cursor->fb) { | 12088 | if (intel_crtc->cursor_bo == obj) |
12153 | crtc_w = drm_rect_width(&state->orig_dst); | 12089 | goto update; |
12154 | crtc_h = drm_rect_height(&state->orig_dst); | ||
12155 | return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); | ||
12156 | } else { | ||
12157 | intel_crtc_update_cursor(crtc, state->visible); | ||
12158 | |||
12159 | intel_frontbuffer_flip(crtc->dev, | ||
12160 | INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); | ||
12161 | 12090 | ||
12162 | return 0; | 12091 | /* |
12092 | * 'prepare' is only called when fb != NULL; we still need to update | ||
12093 | * frontbuffer tracking for the 'disable' case here. | ||
12094 | */ | ||
12095 | if (!obj) { | ||
12096 | mutex_lock(&dev->struct_mutex); | ||
12097 | i915_gem_track_fb(old_obj, NULL, | ||
12098 | INTEL_FRONTBUFFER_CURSOR(pipe)); | ||
12099 | mutex_unlock(&dev->struct_mutex); | ||
12163 | } | 12100 | } |
12164 | } | ||
12165 | |||
12166 | static int | ||
12167 | intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, | ||
12168 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
12169 | unsigned int crtc_w, unsigned int crtc_h, | ||
12170 | uint32_t src_x, uint32_t src_y, | ||
12171 | uint32_t src_w, uint32_t src_h) | ||
12172 | { | ||
12173 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
12174 | struct intel_plane_state state; | ||
12175 | int ret; | ||
12176 | |||
12177 | state.crtc = crtc; | ||
12178 | state.fb = fb; | ||
12179 | 12101 | ||
12180 | /* sample coordinates in 16.16 fixed point */ | 12102 | if (!obj) |
12181 | state.src.x1 = src_x; | 12103 | addr = 0; |
12182 | state.src.x2 = src_x + src_w; | 12104 | else if (!INTEL_INFO(dev)->cursor_needs_physical) |
12183 | state.src.y1 = src_y; | 12105 | addr = i915_gem_obj_ggtt_offset(obj); |
12184 | state.src.y2 = src_y + src_h; | 12106 | else |
12185 | 12107 | addr = obj->phys_handle->busaddr; | |
12186 | /* integer pixels */ | ||
12187 | state.dst.x1 = crtc_x; | ||
12188 | state.dst.x2 = crtc_x + crtc_w; | ||
12189 | state.dst.y1 = crtc_y; | ||
12190 | state.dst.y2 = crtc_y + crtc_h; | ||
12191 | 12108 | ||
12192 | state.clip.x1 = 0; | 12109 | intel_crtc->cursor_addr = addr; |
12193 | state.clip.y1 = 0; | 12110 | intel_crtc->cursor_bo = obj; |
12194 | state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; | 12111 | update: |
12195 | state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; | 12112 | old_width = intel_crtc->cursor_width; |
12196 | 12113 | ||
12197 | state.orig_src = state.src; | 12114 | intel_crtc->cursor_width = drm_rect_width(&state->orig_dst); |
12198 | state.orig_dst = state.dst; | 12115 | intel_crtc->cursor_height = drm_rect_height(&state->orig_dst); |
12199 | 12116 | ||
12200 | ret = intel_check_cursor_plane(plane, &state); | 12117 | if (intel_crtc->active) { |
12201 | if (ret) | 12118 | if (old_width != intel_crtc->cursor_width) |
12202 | return ret; | 12119 | intel_update_watermarks(crtc); |
12120 | intel_crtc_update_cursor(crtc, state->visible); | ||
12203 | 12121 | ||
12204 | return intel_commit_cursor_plane(plane, &state); | 12122 | intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); |
12123 | } | ||
12205 | } | 12124 | } |
12206 | 12125 | ||
12207 | static const struct drm_plane_funcs intel_cursor_plane_funcs = { | 12126 | static const struct drm_plane_funcs intel_cursor_plane_funcs = { |
12208 | .update_plane = intel_cursor_plane_update, | 12127 | .update_plane = intel_update_plane, |
12209 | .disable_plane = intel_cursor_plane_disable, | 12128 | .disable_plane = intel_disable_plane, |
12210 | .destroy = intel_plane_destroy, | 12129 | .destroy = intel_plane_destroy, |
12211 | .set_property = intel_plane_set_property, | 12130 | .set_property = intel_plane_set_property, |
12212 | }; | 12131 | }; |
@@ -12225,6 +12144,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, | |||
12225 | cursor->pipe = pipe; | 12144 | cursor->pipe = pipe; |
12226 | cursor->plane = pipe; | 12145 | cursor->plane = pipe; |
12227 | cursor->rotation = BIT(DRM_ROTATE_0); | 12146 | cursor->rotation = BIT(DRM_ROTATE_0); |
12147 | cursor->check_plane = intel_check_cursor_plane; | ||
12148 | cursor->commit_plane = intel_commit_cursor_plane; | ||
12228 | 12149 | ||
12229 | drm_universal_plane_init(dev, &cursor->base, 0, | 12150 | drm_universal_plane_init(dev, &cursor->base, 0, |
12230 | &intel_cursor_plane_funcs, | 12151 | &intel_cursor_plane_funcs, |
@@ -12383,28 +12304,6 @@ static bool has_edp_a(struct drm_device *dev) | |||
12383 | return true; | 12304 | return true; |
12384 | } | 12305 | } |
12385 | 12306 | ||
12386 | const char *intel_output_name(int output) | ||
12387 | { | ||
12388 | static const char *names[] = { | ||
12389 | [INTEL_OUTPUT_UNUSED] = "Unused", | ||
12390 | [INTEL_OUTPUT_ANALOG] = "Analog", | ||
12391 | [INTEL_OUTPUT_DVO] = "DVO", | ||
12392 | [INTEL_OUTPUT_SDVO] = "SDVO", | ||
12393 | [INTEL_OUTPUT_LVDS] = "LVDS", | ||
12394 | [INTEL_OUTPUT_TVOUT] = "TV", | ||
12395 | [INTEL_OUTPUT_HDMI] = "HDMI", | ||
12396 | [INTEL_OUTPUT_DISPLAYPORT] = "DisplayPort", | ||
12397 | [INTEL_OUTPUT_EDP] = "eDP", | ||
12398 | [INTEL_OUTPUT_DSI] = "DSI", | ||
12399 | [INTEL_OUTPUT_UNKNOWN] = "Unknown", | ||
12400 | }; | ||
12401 | |||
12402 | if (output < 0 || output >= ARRAY_SIZE(names) || !names[output]) | ||
12403 | return "Invalid"; | ||
12404 | |||
12405 | return names[output]; | ||
12406 | } | ||
12407 | |||
12408 | static bool intel_crt_present(struct drm_device *dev) | 12307 | static bool intel_crt_present(struct drm_device *dev) |
12409 | { | 12308 | { |
12410 | struct drm_i915_private *dev_priv = dev->dev_private; | 12309 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -13153,7 +13052,7 @@ void intel_modeset_init(struct drm_device *dev) | |||
13153 | intel_setup_outputs(dev); | 13052 | intel_setup_outputs(dev); |
13154 | 13053 | ||
13155 | /* Just in case the BIOS is doing something questionable. */ | 13054 | /* Just in case the BIOS is doing something questionable. */ |
13156 | intel_disable_fbc(dev); | 13055 | intel_fbc_disable(dev); |
13157 | 13056 | ||
13158 | drm_modeset_lock_all(dev); | 13057 | drm_modeset_lock_all(dev); |
13159 | intel_modeset_setup_hw_state(dev, false); | 13058 | intel_modeset_setup_hw_state(dev, false); |
@@ -13670,7 +13569,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
13670 | 13569 | ||
13671 | intel_unregister_dsm_handler(); | 13570 | intel_unregister_dsm_handler(); |
13672 | 13571 | ||
13673 | intel_disable_fbc(dev); | 13572 | intel_fbc_disable(dev); |
13674 | 13573 | ||
13675 | ironlake_teardown_rc6(dev); | 13574 | ironlake_teardown_rc6(dev); |
13676 | 13575 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5cecc20efa71..88d81a8b0d35 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1558,7 +1558,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1558 | vdd = edp_panel_vdd_on(intel_dp); | 1558 | vdd = edp_panel_vdd_on(intel_dp); |
1559 | pps_unlock(intel_dp); | 1559 | pps_unlock(intel_dp); |
1560 | 1560 | ||
1561 | WARN(!vdd, "eDP port %c VDD already requested on\n", | 1561 | I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", |
1562 | port_name(dp_to_dig_port(intel_dp)->port)); | 1562 | port_name(dp_to_dig_port(intel_dp)->port)); |
1563 | } | 1563 | } |
1564 | 1564 | ||
@@ -1642,7 +1642,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | |||
1642 | if (!is_edp(intel_dp)) | 1642 | if (!is_edp(intel_dp)) |
1643 | return; | 1643 | return; |
1644 | 1644 | ||
1645 | WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", | 1645 | I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", |
1646 | port_name(dp_to_dig_port(intel_dp)->port)); | 1646 | port_name(dp_to_dig_port(intel_dp)->port)); |
1647 | 1647 | ||
1648 | intel_dp->want_panel_vdd = false; | 1648 | intel_dp->want_panel_vdd = false; |
@@ -2105,6 +2105,9 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
2105 | if (crtc->config.has_audio) | 2105 | if (crtc->config.has_audio) |
2106 | intel_audio_codec_disable(encoder); | 2106 | intel_audio_codec_disable(encoder); |
2107 | 2107 | ||
2108 | if (HAS_PSR(dev) && !HAS_DDI(dev)) | ||
2109 | intel_psr_disable(intel_dp); | ||
2110 | |||
2108 | /* Make sure the panel is off before trying to change the mode. But also | 2111 | /* Make sure the panel is off before trying to change the mode. But also |
2109 | * ensure that we have vdd while we switch off the panel. */ | 2112 | * ensure that we have vdd while we switch off the panel. */ |
2110 | intel_edp_panel_vdd_on(intel_dp); | 2113 | intel_edp_panel_vdd_on(intel_dp); |
@@ -2329,6 +2332,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder) | |||
2329 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 2332 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
2330 | 2333 | ||
2331 | intel_edp_backlight_on(intel_dp); | 2334 | intel_edp_backlight_on(intel_dp); |
2335 | intel_psr_enable(intel_dp); | ||
2332 | } | 2336 | } |
2333 | 2337 | ||
2334 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) | 2338 | static void g4x_pre_enable_dp(struct intel_encoder *encoder) |
@@ -4306,7 +4310,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
4306 | 4310 | ||
4307 | drm_dp_aux_unregister(&intel_dp->aux); | 4311 | drm_dp_aux_unregister(&intel_dp->aux); |
4308 | intel_dp_mst_encoder_cleanup(intel_dig_port); | 4312 | intel_dp_mst_encoder_cleanup(intel_dig_port); |
4309 | drm_encoder_cleanup(encoder); | ||
4310 | if (is_edp(intel_dp)) { | 4313 | if (is_edp(intel_dp)) { |
4311 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 4314 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
4312 | /* | 4315 | /* |
@@ -4322,6 +4325,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
4322 | intel_dp->edp_notifier.notifier_call = NULL; | 4325 | intel_dp->edp_notifier.notifier_call = NULL; |
4323 | } | 4326 | } |
4324 | } | 4327 | } |
4328 | drm_encoder_cleanup(encoder); | ||
4325 | kfree(intel_dig_port); | 4329 | kfree(intel_dig_port); |
4326 | } | 4330 | } |
4327 | 4331 | ||
@@ -4763,14 +4767,9 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) | |||
4763 | } | 4767 | } |
4764 | 4768 | ||
4765 | /* | 4769 | /* |
4766 | * FIXME: This needs proper synchronization with psr state. But really | 4770 | * FIXME: This needs proper synchronization with psr state for some |
4767 | * hard to tell without seeing the user of this function of this code. | 4771 | * platforms that cannot have PSR and DRRS enabled at the same time. |
4768 | * Check locking and ordering once that lands. | ||
4769 | */ | 4772 | */ |
4770 | if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) { | ||
4771 | DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); | ||
4772 | return; | ||
4773 | } | ||
4774 | 4773 | ||
4775 | encoder = intel_attached_encoder(&intel_connector->base); | 4774 | encoder = intel_attached_encoder(&intel_connector->base); |
4776 | intel_dp = enc_to_intel_dp(&encoder->base); | 4775 | intel_dp = enc_to_intel_dp(&encoder->base); |
@@ -5086,7 +5085,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
5086 | intel_dp_aux_init(intel_dp, intel_connector); | 5085 | intel_dp_aux_init(intel_dp, intel_connector); |
5087 | 5086 | ||
5088 | /* init MST on ports that can support it */ | 5087 | /* init MST on ports that can support it */ |
5089 | if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { | 5088 | if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { |
5090 | if (port == PORT_B || port == PORT_C || port == PORT_D) { | 5089 | if (port == PORT_B || port == PORT_C || port == PORT_D) { |
5091 | intel_dp_mst_encoder_init(intel_dig_port, | 5090 | intel_dp_mst_encoder_init(intel_dig_port, |
5092 | intel_connector->base.base.id); | 5091 | intel_connector->base.base.id); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 25fdbb16d4e0..588b618ab668 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -244,8 +244,7 @@ typedef struct dpll { | |||
244 | } intel_clock_t; | 244 | } intel_clock_t; |
245 | 245 | ||
246 | struct intel_plane_state { | 246 | struct intel_plane_state { |
247 | struct drm_crtc *crtc; | 247 | struct drm_plane_state base; |
248 | struct drm_framebuffer *fb; | ||
249 | struct drm_rect src; | 248 | struct drm_rect src; |
250 | struct drm_rect dst; | 249 | struct drm_rect dst; |
251 | struct drm_rect clip; | 250 | struct drm_rect clip; |
@@ -406,8 +405,7 @@ struct intel_pipe_wm { | |||
406 | }; | 405 | }; |
407 | 406 | ||
408 | struct intel_mmio_flip { | 407 | struct intel_mmio_flip { |
409 | u32 seqno; | 408 | struct drm_i915_gem_request *req; |
410 | struct intel_engine_cs *ring; | ||
411 | struct work_struct work; | 409 | struct work_struct work; |
412 | }; | 410 | }; |
413 | 411 | ||
@@ -510,6 +508,10 @@ struct intel_plane { | |||
510 | uint32_t src_w, uint32_t src_h); | 508 | uint32_t src_w, uint32_t src_h); |
511 | void (*disable_plane)(struct drm_plane *plane, | 509 | void (*disable_plane)(struct drm_plane *plane, |
512 | struct drm_crtc *crtc); | 510 | struct drm_crtc *crtc); |
511 | int (*check_plane)(struct drm_plane *plane, | ||
512 | struct intel_plane_state *state); | ||
513 | void (*commit_plane)(struct drm_plane *plane, | ||
514 | struct intel_plane_state *state); | ||
513 | int (*update_colorkey)(struct drm_plane *plane, | 515 | int (*update_colorkey)(struct drm_plane *plane, |
514 | struct drm_intel_sprite_colorkey *key); | 516 | struct drm_intel_sprite_colorkey *key); |
515 | void (*get_colorkey)(struct drm_plane *plane, | 517 | void (*get_colorkey)(struct drm_plane *plane, |
@@ -708,8 +710,7 @@ struct intel_unpin_work { | |||
708 | #define INTEL_FLIP_COMPLETE 2 | 710 | #define INTEL_FLIP_COMPLETE 2 |
709 | u32 flip_count; | 711 | u32 flip_count; |
710 | u32 gtt_offset; | 712 | u32 gtt_offset; |
711 | struct intel_engine_cs *flip_queued_ring; | 713 | struct drm_i915_gem_request *flip_queued_req; |
712 | u32 flip_queued_seqno; | ||
713 | int flip_queued_vblank; | 714 | int flip_queued_vblank; |
714 | int flip_ready_vblank; | 715 | int flip_ready_vblank; |
715 | bool enable_stall_check; | 716 | bool enable_stall_check; |
@@ -874,7 +875,6 @@ void intel_audio_codec_enable(struct intel_encoder *encoder); | |||
874 | void intel_audio_codec_disable(struct intel_encoder *encoder); | 875 | void intel_audio_codec_disable(struct intel_encoder *encoder); |
875 | 876 | ||
876 | /* intel_display.c */ | 877 | /* intel_display.c */ |
877 | const char *intel_output_name(int output); | ||
878 | bool intel_has_pending_fb_unpin(struct drm_device *dev); | 878 | bool intel_has_pending_fb_unpin(struct drm_device *dev); |
879 | int intel_pch_rawclk(struct drm_device *dev); | 879 | int intel_pch_rawclk(struct drm_device *dev); |
880 | void intel_mark_busy(struct drm_device *dev); | 880 | void intel_mark_busy(struct drm_device *dev); |
@@ -925,6 +925,10 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane); | |||
925 | void intel_finish_page_flip(struct drm_device *dev, int pipe); | 925 | void intel_finish_page_flip(struct drm_device *dev, int pipe); |
926 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane); | 926 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
927 | void intel_check_page_flip(struct drm_device *dev, int pipe); | 927 | void intel_check_page_flip(struct drm_device *dev, int pipe); |
928 | int intel_prepare_plane_fb(struct drm_plane *plane, | ||
929 | struct drm_framebuffer *fb); | ||
930 | void intel_cleanup_plane_fb(struct drm_plane *plane, | ||
931 | struct drm_framebuffer *fb); | ||
928 | 932 | ||
929 | /* shared dpll functions */ | 933 | /* shared dpll functions */ |
930 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); | 934 | struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); |
@@ -1010,6 +1014,12 @@ void intel_dp_hot_plug(struct intel_encoder *intel_encoder); | |||
1010 | void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); | 1014 | void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); |
1011 | uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); | 1015 | uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); |
1012 | void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); | 1016 | void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); |
1017 | int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | ||
1018 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
1019 | unsigned int crtc_w, unsigned int crtc_h, | ||
1020 | uint32_t src_x, uint32_t src_y, | ||
1021 | uint32_t src_w, uint32_t src_h); | ||
1022 | int intel_disable_plane(struct drm_plane *plane); | ||
1013 | 1023 | ||
1014 | /* intel_dp_mst.c */ | 1024 | /* intel_dp_mst.c */ |
1015 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); | 1025 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); |
@@ -1053,6 +1063,13 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) | |||
1053 | } | 1063 | } |
1054 | #endif | 1064 | #endif |
1055 | 1065 | ||
1066 | /* intel_fbc.c */ | ||
1067 | bool intel_fbc_enabled(struct drm_device *dev); | ||
1068 | void intel_fbc_update(struct drm_device *dev); | ||
1069 | void intel_fbc_init(struct drm_i915_private *dev_priv); | ||
1070 | void intel_fbc_disable(struct drm_device *dev); | ||
1071 | void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); | ||
1072 | |||
1056 | /* intel_hdmi.c */ | 1073 | /* intel_hdmi.c */ |
1057 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); | 1074 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); |
1058 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | 1075 | void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
@@ -1083,6 +1100,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, | |||
1083 | struct drm_file *file_priv); | 1100 | struct drm_file *file_priv); |
1084 | int intel_overlay_attrs(struct drm_device *dev, void *data, | 1101 | int intel_overlay_attrs(struct drm_device *dev, void *data, |
1085 | struct drm_file *file_priv); | 1102 | struct drm_file *file_priv); |
1103 | void intel_overlay_reset(struct drm_i915_private *dev_priv); | ||
1086 | 1104 | ||
1087 | 1105 | ||
1088 | /* intel_panel.c */ | 1106 | /* intel_panel.c */ |
@@ -1115,7 +1133,6 @@ void intel_backlight_unregister(struct drm_device *dev); | |||
1115 | 1133 | ||
1116 | 1134 | ||
1117 | /* intel_psr.c */ | 1135 | /* intel_psr.c */ |
1118 | bool intel_psr_is_enabled(struct drm_device *dev); | ||
1119 | void intel_psr_enable(struct intel_dp *intel_dp); | 1136 | void intel_psr_enable(struct intel_dp *intel_dp); |
1120 | void intel_psr_disable(struct intel_dp *intel_dp); | 1137 | void intel_psr_disable(struct intel_dp *intel_dp); |
1121 | void intel_psr_invalidate(struct drm_device *dev, | 1138 | void intel_psr_invalidate(struct drm_device *dev, |
@@ -1159,8 +1176,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, | |||
1159 | bool enabled, bool scaled); | 1176 | bool enabled, bool scaled); |
1160 | void intel_init_pm(struct drm_device *dev); | 1177 | void intel_init_pm(struct drm_device *dev); |
1161 | void intel_pm_setup(struct drm_device *dev); | 1178 | void intel_pm_setup(struct drm_device *dev); |
1162 | bool intel_fbc_enabled(struct drm_device *dev); | ||
1163 | void intel_update_fbc(struct drm_device *dev); | ||
1164 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); | 1179 | void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
1165 | void intel_gpu_ips_teardown(void); | 1180 | void intel_gpu_ips_teardown(void); |
1166 | void intel_init_gt_powersave(struct drm_device *dev); | 1181 | void intel_init_gt_powersave(struct drm_device *dev); |
@@ -1191,7 +1206,6 @@ int intel_plane_set_property(struct drm_plane *plane, | |||
1191 | struct drm_property *prop, | 1206 | struct drm_property *prop, |
1192 | uint64_t val); | 1207 | uint64_t val); |
1193 | int intel_plane_restore(struct drm_plane *plane); | 1208 | int intel_plane_restore(struct drm_plane *plane); |
1194 | void intel_plane_disable(struct drm_plane *plane); | ||
1195 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, | 1209 | int intel_sprite_set_colorkey(struct drm_device *dev, void *data, |
1196 | struct drm_file *file_priv); | 1210 | struct drm_file *file_priv); |
1197 | int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | 1211 | int intel_sprite_get_colorkey(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 0b184079de14..42b6d6f5cecc 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -102,11 +102,62 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
102 | return true; | 102 | return true; |
103 | } | 103 | } |
104 | 104 | ||
105 | static void intel_dsi_port_enable(struct intel_encoder *encoder) | ||
106 | { | ||
107 | struct drm_device *dev = encoder->base.dev; | ||
108 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
109 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
110 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
111 | enum port port; | ||
112 | u32 temp; | ||
113 | |||
114 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { | ||
115 | temp = I915_READ(VLV_CHICKEN_3); | ||
116 | temp &= ~PIXEL_OVERLAP_CNT_MASK | | ||
117 | intel_dsi->pixel_overlap << | ||
118 | PIXEL_OVERLAP_CNT_SHIFT; | ||
119 | I915_WRITE(VLV_CHICKEN_3, temp); | ||
120 | } | ||
121 | |||
122 | for_each_dsi_port(port, intel_dsi->ports) { | ||
123 | temp = I915_READ(MIPI_PORT_CTRL(port)); | ||
124 | temp &= ~LANE_CONFIGURATION_MASK; | ||
125 | temp &= ~DUAL_LINK_MODE_MASK; | ||
126 | |||
127 | if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) { | ||
128 | temp |= (intel_dsi->dual_link - 1) | ||
129 | << DUAL_LINK_MODE_SHIFT; | ||
130 | temp |= intel_crtc->pipe ? | ||
131 | LANE_CONFIGURATION_DUAL_LINK_B : | ||
132 | LANE_CONFIGURATION_DUAL_LINK_A; | ||
133 | } | ||
134 | /* assert ip_tg_enable signal */ | ||
135 | I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE); | ||
136 | POSTING_READ(MIPI_PORT_CTRL(port)); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | static void intel_dsi_port_disable(struct intel_encoder *encoder) | ||
141 | { | ||
142 | struct drm_device *dev = encoder->base.dev; | ||
143 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
144 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
145 | enum port port; | ||
146 | u32 temp; | ||
147 | |||
148 | for_each_dsi_port(port, intel_dsi->ports) { | ||
149 | /* de-assert ip_tg_enable signal */ | ||
150 | temp = I915_READ(MIPI_PORT_CTRL(port)); | ||
151 | I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE); | ||
152 | POSTING_READ(MIPI_PORT_CTRL(port)); | ||
153 | } | ||
154 | } | ||
155 | |||
105 | static void intel_dsi_device_ready(struct intel_encoder *encoder) | 156 | static void intel_dsi_device_ready(struct intel_encoder *encoder) |
106 | { | 157 | { |
107 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 158 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
108 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 159 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
109 | int pipe = intel_crtc->pipe; | 160 | enum port port; |
110 | u32 val; | 161 | u32 val; |
111 | 162 | ||
112 | DRM_DEBUG_KMS("\n"); | 163 | DRM_DEBUG_KMS("\n"); |
@@ -120,18 +171,26 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder) | |||
120 | /* bandgap reset is needed after everytime we do power gate */ | 171 | /* bandgap reset is needed after everytime we do power gate */ |
121 | band_gap_reset(dev_priv); | 172 | band_gap_reset(dev_priv); |
122 | 173 | ||
123 | I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); | 174 | for_each_dsi_port(port, intel_dsi->ports) { |
124 | usleep_range(2500, 3000); | 175 | |
176 | I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER); | ||
177 | usleep_range(2500, 3000); | ||
125 | 178 | ||
126 | val = I915_READ(MIPI_PORT_CTRL(pipe)); | 179 | val = I915_READ(MIPI_PORT_CTRL(port)); |
127 | I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); | ||
128 | usleep_range(1000, 1500); | ||
129 | 180 | ||
130 | I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); | 181 | /* Enable MIPI PHY transparent latch |
131 | usleep_range(2500, 3000); | 182 | * Common bit for both MIPI Port A & MIPI Port C |
183 | * No similar bit in MIPI Port C reg | ||
184 | */ | ||
185 | I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD); | ||
186 | usleep_range(1000, 1500); | ||
132 | 187 | ||
133 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); | 188 | I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT); |
134 | usleep_range(2500, 3000); | 189 | usleep_range(2500, 3000); |
190 | |||
191 | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY); | ||
192 | usleep_range(2500, 3000); | ||
193 | } | ||
135 | } | 194 | } |
136 | 195 | ||
137 | static void intel_dsi_enable(struct intel_encoder *encoder) | 196 | static void intel_dsi_enable(struct intel_encoder *encoder) |
@@ -140,13 +199,12 @@ static void intel_dsi_enable(struct intel_encoder *encoder) | |||
140 | struct drm_i915_private *dev_priv = dev->dev_private; | 199 | struct drm_i915_private *dev_priv = dev->dev_private; |
141 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 200 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
142 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 201 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
143 | int pipe = intel_crtc->pipe; | 202 | enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe); |
144 | u32 temp; | ||
145 | 203 | ||
146 | DRM_DEBUG_KMS("\n"); | 204 | DRM_DEBUG_KMS("\n"); |
147 | 205 | ||
148 | if (is_cmd_mode(intel_dsi)) | 206 | if (is_cmd_mode(intel_dsi)) |
149 | I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); | 207 | I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4); |
150 | else { | 208 | else { |
151 | msleep(20); /* XXX */ | 209 | msleep(20); /* XXX */ |
152 | dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN); | 210 | dpi_send_cmd(intel_dsi, TURN_ON, DPI_LP_MODE_EN); |
@@ -157,11 +215,7 @@ static void intel_dsi_enable(struct intel_encoder *encoder) | |||
157 | 215 | ||
158 | wait_for_dsi_fifo_empty(intel_dsi); | 216 | wait_for_dsi_fifo_empty(intel_dsi); |
159 | 217 | ||
160 | /* assert ip_tg_enable signal */ | 218 | intel_dsi_port_enable(encoder); |
161 | temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK; | ||
162 | temp = temp | intel_dsi->port_bits; | ||
163 | I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); | ||
164 | POSTING_READ(MIPI_PORT_CTRL(pipe)); | ||
165 | } | 219 | } |
166 | } | 220 | } |
167 | 221 | ||
@@ -235,9 +289,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder) | |||
235 | { | 289 | { |
236 | struct drm_device *dev = encoder->base.dev; | 290 | struct drm_device *dev = encoder->base.dev; |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | 291 | struct drm_i915_private *dev_priv = dev->dev_private; |
238 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | ||
239 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | 292 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
240 | int pipe = intel_crtc->pipe; | 293 | enum port port; |
241 | u32 temp; | 294 | u32 temp; |
242 | 295 | ||
243 | DRM_DEBUG_KMS("\n"); | 296 | DRM_DEBUG_KMS("\n"); |
@@ -245,31 +298,28 @@ static void intel_dsi_disable(struct intel_encoder *encoder) | |||
245 | if (is_vid_mode(intel_dsi)) { | 298 | if (is_vid_mode(intel_dsi)) { |
246 | wait_for_dsi_fifo_empty(intel_dsi); | 299 | wait_for_dsi_fifo_empty(intel_dsi); |
247 | 300 | ||
248 | /* de-assert ip_tg_enable signal */ | 301 | intel_dsi_port_disable(encoder); |
249 | temp = I915_READ(MIPI_PORT_CTRL(pipe)); | ||
250 | I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); | ||
251 | POSTING_READ(MIPI_PORT_CTRL(pipe)); | ||
252 | |||
253 | msleep(2); | 302 | msleep(2); |
254 | } | 303 | } |
255 | 304 | ||
256 | /* Panel commands can be sent when clock is in LP11 */ | 305 | for_each_dsi_port(port, intel_dsi->ports) { |
257 | I915_WRITE(MIPI_DEVICE_READY(pipe), 0x0); | 306 | /* Panel commands can be sent when clock is in LP11 */ |
307 | I915_WRITE(MIPI_DEVICE_READY(port), 0x0); | ||
258 | 308 | ||
259 | temp = I915_READ(MIPI_CTRL(pipe)); | 309 | temp = I915_READ(MIPI_CTRL(port)); |
260 | temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; | 310 | temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; |
261 | I915_WRITE(MIPI_CTRL(pipe), temp | | 311 | I915_WRITE(MIPI_CTRL(port), temp | |
262 | intel_dsi->escape_clk_div << | 312 | intel_dsi->escape_clk_div << |
263 | ESCAPE_CLOCK_DIVIDER_SHIFT); | 313 | ESCAPE_CLOCK_DIVIDER_SHIFT); |
264 | 314 | ||
265 | I915_WRITE(MIPI_EOT_DISABLE(pipe), CLOCKSTOP); | 315 | I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); |
266 | 316 | ||
267 | temp = I915_READ(MIPI_DSI_FUNC_PRG(pipe)); | 317 | temp = I915_READ(MIPI_DSI_FUNC_PRG(port)); |
268 | temp &= ~VID_MODE_FORMAT_MASK; | 318 | temp &= ~VID_MODE_FORMAT_MASK; |
269 | I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), temp); | 319 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp); |
270 | |||
271 | I915_WRITE(MIPI_DEVICE_READY(pipe), 0x1); | ||
272 | 320 | ||
321 | I915_WRITE(MIPI_DEVICE_READY(port), 0x1); | ||
322 | } | ||
273 | /* if disable packets are sent before sending shutdown packet then in | 323 | /* if disable packets are sent before sending shutdown packet then in |
274 | * some next enable sequence send turn on packet error is observed */ | 324 | * some next enable sequence send turn on packet error is observed */ |
275 | if (intel_dsi->dev.dev_ops->disable) | 325 | if (intel_dsi->dev.dev_ops->disable) |
@@ -281,31 +331,42 @@ static void intel_dsi_disable(struct intel_encoder *encoder) | |||
281 | static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) | 331 | static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) |
282 | { | 332 | { |
283 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 333 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
284 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); | 334 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); |
285 | int pipe = intel_crtc->pipe; | 335 | enum port port; |
286 | u32 val; | 336 | u32 val; |
287 | 337 | ||
288 | DRM_DEBUG_KMS("\n"); | 338 | DRM_DEBUG_KMS("\n"); |
289 | 339 | for_each_dsi_port(port, intel_dsi->ports) { | |
290 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER); | 340 | |
291 | usleep_range(2000, 2500); | 341 | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | |
292 | 342 | ULPS_STATE_ENTER); | |
293 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); | 343 | usleep_range(2000, 2500); |
294 | usleep_range(2000, 2500); | 344 | |
295 | 345 | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | | |
296 | I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER); | 346 | ULPS_STATE_EXIT); |
297 | usleep_range(2000, 2500); | 347 | usleep_range(2000, 2500); |
298 | 348 | ||
299 | if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) | 349 | I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY | |
300 | == 0x00000), 30)) | 350 | ULPS_STATE_ENTER); |
301 | DRM_ERROR("DSI LP not going Low\n"); | 351 | usleep_range(2000, 2500); |
302 | 352 | ||
303 | val = I915_READ(MIPI_PORT_CTRL(pipe)); | 353 | /* Wait till Clock lanes are in LP-00 state for MIPI Port A |
304 | I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD); | 354 | * only. MIPI Port C has no similar bit for checking |
305 | usleep_range(1000, 1500); | 355 | */ |
306 | 356 | if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT) | |
307 | I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); | 357 | == 0x00000), 30)) |
308 | usleep_range(2000, 2500); | 358 | DRM_ERROR("DSI LP not going Low\n"); |
359 | |||
360 | val = I915_READ(MIPI_PORT_CTRL(port)); | ||
361 | /* Disable MIPI PHY transparent latch | ||
362 | * Common bit for both MIPI Port A & MIPI Port C | ||
363 | */ | ||
364 | I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD); | ||
365 | usleep_range(1000, 1500); | ||
366 | |||
367 | I915_WRITE(MIPI_DEVICE_READY(port), 0x00); | ||
368 | usleep_range(2000, 2500); | ||
369 | } | ||
309 | 370 | ||
310 | vlv_disable_dsi_pll(encoder); | 371 | vlv_disable_dsi_pll(encoder); |
311 | } | 372 | } |
@@ -337,9 +398,11 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
337 | enum pipe *pipe) | 398 | enum pipe *pipe) |
338 | { | 399 | { |
339 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 400 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
401 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
402 | struct drm_device *dev = encoder->base.dev; | ||
340 | enum intel_display_power_domain power_domain; | 403 | enum intel_display_power_domain power_domain; |
341 | u32 port, func; | 404 | u32 dpi_enabled, func; |
342 | enum pipe p; | 405 | enum port port; |
343 | 406 | ||
344 | DRM_DEBUG_KMS("\n"); | 407 | DRM_DEBUG_KMS("\n"); |
345 | 408 | ||
@@ -348,13 +411,23 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, | |||
348 | return false; | 411 | return false; |
349 | 412 | ||
350 | /* XXX: this only works for one DSI output */ | 413 | /* XXX: this only works for one DSI output */ |
351 | for (p = PIPE_A; p <= PIPE_B; p++) { | 414 | for_each_dsi_port(port, intel_dsi->ports) { |
352 | port = I915_READ(MIPI_PORT_CTRL(p)); | 415 | func = I915_READ(MIPI_DSI_FUNC_PRG(port)); |
353 | func = I915_READ(MIPI_DSI_FUNC_PRG(p)); | 416 | dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) & |
354 | 417 | DPI_ENABLE; | |
355 | if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) { | 418 | |
356 | if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) { | 419 | /* Due to some hardware limitations on BYT, MIPI Port C DPI |
357 | *pipe = p; | 420 | * Enable bit does not get set. To check whether DSI Port C |
421 | * was enabled in BIOS, check the Pipe B enable bit | ||
422 | */ | ||
423 | if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && | ||
424 | (port == PORT_C)) | ||
425 | dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & | ||
426 | PIPECONF_ENABLE; | ||
427 | |||
428 | if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { | ||
429 | if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { | ||
430 | *pipe = port == PORT_A ? PIPE_A : PIPE_B; | ||
358 | return true; | 431 | return true; |
359 | } | 432 | } |
360 | } | 433 | } |
@@ -437,7 +510,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, | |||
437 | struct drm_i915_private *dev_priv = dev->dev_private; | 510 | struct drm_i915_private *dev_priv = dev->dev_private; |
438 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 511 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
439 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | 512 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); |
440 | int pipe = intel_crtc->pipe; | 513 | enum port port; |
441 | unsigned int bpp = intel_crtc->config.pipe_bpp; | 514 | unsigned int bpp = intel_crtc->config.pipe_bpp; |
442 | unsigned int lane_count = intel_dsi->lane_count; | 515 | unsigned int lane_count = intel_dsi->lane_count; |
443 | 516 | ||
@@ -448,6 +521,15 @@ static void set_dsi_timings(struct drm_encoder *encoder, | |||
448 | hsync = mode->hsync_end - mode->hsync_start; | 521 | hsync = mode->hsync_end - mode->hsync_start; |
449 | hbp = mode->htotal - mode->hsync_end; | 522 | hbp = mode->htotal - mode->hsync_end; |
450 | 523 | ||
524 | if (intel_dsi->dual_link) { | ||
525 | hactive /= 2; | ||
526 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) | ||
527 | hactive += intel_dsi->pixel_overlap; | ||
528 | hfp /= 2; | ||
529 | hsync /= 2; | ||
530 | hbp /= 2; | ||
531 | } | ||
532 | |||
451 | vfp = mode->vsync_start - mode->vdisplay; | 533 | vfp = mode->vsync_start - mode->vdisplay; |
452 | vsync = mode->vsync_end - mode->vsync_start; | 534 | vsync = mode->vsync_end - mode->vsync_start; |
453 | vbp = mode->vtotal - mode->vsync_end; | 535 | vbp = mode->vtotal - mode->vsync_end; |
@@ -460,18 +542,20 @@ static void set_dsi_timings(struct drm_encoder *encoder, | |||
460 | intel_dsi->burst_mode_ratio); | 542 | intel_dsi->burst_mode_ratio); |
461 | hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); | 543 | hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); |
462 | 544 | ||
463 | I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive); | 545 | for_each_dsi_port(port, intel_dsi->ports) { |
464 | I915_WRITE(MIPI_HFP_COUNT(pipe), hfp); | 546 | I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive); |
547 | I915_WRITE(MIPI_HFP_COUNT(port), hfp); | ||
465 | 548 | ||
466 | /* meaningful for video mode non-burst sync pulse mode only, can be zero | 549 | /* meaningful for video mode non-burst sync pulse mode only, |
467 | * for non-burst sync events and burst modes */ | 550 | * can be zero for non-burst sync events and burst modes */ |
468 | I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync); | 551 | I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync); |
469 | I915_WRITE(MIPI_HBP_COUNT(pipe), hbp); | 552 | I915_WRITE(MIPI_HBP_COUNT(port), hbp); |
470 | 553 | ||
471 | /* vertical values are in terms of lines */ | 554 | /* vertical values are in terms of lines */ |
472 | I915_WRITE(MIPI_VFP_COUNT(pipe), vfp); | 555 | I915_WRITE(MIPI_VFP_COUNT(port), vfp); |
473 | I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync); | 556 | I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync); |
474 | I915_WRITE(MIPI_VBP_COUNT(pipe), vbp); | 557 | I915_WRITE(MIPI_VBP_COUNT(port), vbp); |
558 | } | ||
475 | } | 559 | } |
476 | 560 | ||
477 | static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | 561 | static void intel_dsi_prepare(struct intel_encoder *intel_encoder) |
@@ -483,32 +567,43 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | |||
483 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | 567 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); |
484 | struct drm_display_mode *adjusted_mode = | 568 | struct drm_display_mode *adjusted_mode = |
485 | &intel_crtc->config.adjusted_mode; | 569 | &intel_crtc->config.adjusted_mode; |
486 | int pipe = intel_crtc->pipe; | 570 | enum port port; |
487 | unsigned int bpp = intel_crtc->config.pipe_bpp; | 571 | unsigned int bpp = intel_crtc->config.pipe_bpp; |
488 | u32 val, tmp; | 572 | u32 val, tmp; |
573 | u16 mode_hdisplay; | ||
489 | 574 | ||
490 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); | 575 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe)); |
491 | 576 | ||
492 | /* escape clock divider, 20MHz, shared for A and C. device ready must be | 577 | mode_hdisplay = adjusted_mode->hdisplay; |
493 | * off when doing this! txclkesc? */ | 578 | |
494 | tmp = I915_READ(MIPI_CTRL(0)); | 579 | if (intel_dsi->dual_link) { |
495 | tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; | 580 | mode_hdisplay /= 2; |
496 | I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1); | 581 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) |
582 | mode_hdisplay += intel_dsi->pixel_overlap; | ||
583 | } | ||
497 | 584 | ||
498 | /* read request priority is per pipe */ | 585 | for_each_dsi_port(port, intel_dsi->ports) { |
499 | tmp = I915_READ(MIPI_CTRL(pipe)); | 586 | /* escape clock divider, 20MHz, shared for A and C. |
500 | tmp &= ~READ_REQUEST_PRIORITY_MASK; | 587 | * device ready must be off when doing this! txclkesc? */ |
501 | I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH); | 588 | tmp = I915_READ(MIPI_CTRL(PORT_A)); |
589 | tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; | ||
590 | I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1); | ||
502 | 591 | ||
503 | /* XXX: why here, why like this? handling in irq handler?! */ | 592 | /* read request priority is per pipe */ |
504 | I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff); | 593 | tmp = I915_READ(MIPI_CTRL(port)); |
505 | I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff); | 594 | tmp &= ~READ_REQUEST_PRIORITY_MASK; |
595 | I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); | ||
506 | 596 | ||
507 | I915_WRITE(MIPI_DPHY_PARAM(pipe), intel_dsi->dphy_reg); | 597 | /* XXX: why here, why like this? handling in irq handler?! */ |
598 | I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff); | ||
599 | I915_WRITE(MIPI_INTR_EN(port), 0xffffffff); | ||
508 | 600 | ||
509 | I915_WRITE(MIPI_DPI_RESOLUTION(pipe), | 601 | I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg); |
510 | adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | | 602 | |
511 | adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT); | 603 | I915_WRITE(MIPI_DPI_RESOLUTION(port), |
604 | adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | | ||
605 | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); | ||
606 | } | ||
512 | 607 | ||
513 | set_dsi_timings(encoder, adjusted_mode); | 608 | set_dsi_timings(encoder, adjusted_mode); |
514 | 609 | ||
@@ -522,95 +617,102 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) | |||
522 | /* XXX: cross-check bpp vs. pixel format? */ | 617 | /* XXX: cross-check bpp vs. pixel format? */ |
523 | val |= intel_dsi->pixel_format; | 618 | val |= intel_dsi->pixel_format; |
524 | } | 619 | } |
525 | I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val); | ||
526 | |||
527 | /* timeouts for recovery. one frame IIUC. if counter expires, EOT and | ||
528 | * stop state. */ | ||
529 | |||
530 | /* | ||
531 | * In burst mode, value greater than one DPI line Time in byte clock | ||
532 | * (txbyteclkhs) To timeout this timer 1+ of the above said value is | ||
533 | * recommended. | ||
534 | * | ||
535 | * In non-burst mode, Value greater than one DPI frame time in byte | ||
536 | * clock(txbyteclkhs) To timeout this timer 1+ of the above said value | ||
537 | * is recommended. | ||
538 | * | ||
539 | * In DBI only mode, value greater than one DBI frame time in byte | ||
540 | * clock(txbyteclkhs) To timeout this timer 1+ of the above said value | ||
541 | * is recommended. | ||
542 | */ | ||
543 | |||
544 | if (is_vid_mode(intel_dsi) && | ||
545 | intel_dsi->video_mode_format == VIDEO_MODE_BURST) { | ||
546 | I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), | ||
547 | txbyteclkhs(adjusted_mode->htotal, bpp, | ||
548 | intel_dsi->lane_count, | ||
549 | intel_dsi->burst_mode_ratio) + 1); | ||
550 | } else { | ||
551 | I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), | ||
552 | txbyteclkhs(adjusted_mode->vtotal * | ||
553 | adjusted_mode->htotal, | ||
554 | bpp, intel_dsi->lane_count, | ||
555 | intel_dsi->burst_mode_ratio) + 1); | ||
556 | } | ||
557 | I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout); | ||
558 | I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val); | ||
559 | I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), intel_dsi->rst_timer_val); | ||
560 | 620 | ||
561 | /* dphy stuff */ | 621 | tmp = 0; |
562 | |||
563 | /* in terms of low power clock */ | ||
564 | I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(intel_dsi->escape_clk_div, 100)); | ||
565 | |||
566 | val = 0; | ||
567 | if (intel_dsi->eotp_pkt == 0) | 622 | if (intel_dsi->eotp_pkt == 0) |
568 | val |= EOT_DISABLE; | 623 | tmp |= EOT_DISABLE; |
569 | |||
570 | if (intel_dsi->clock_stop) | 624 | if (intel_dsi->clock_stop) |
571 | val |= CLOCKSTOP; | 625 | tmp |= CLOCKSTOP; |
572 | 626 | ||
573 | /* recovery disables */ | 627 | for_each_dsi_port(port, intel_dsi->ports) { |
574 | I915_WRITE(MIPI_EOT_DISABLE(pipe), val); | 628 | I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); |
575 | 629 | ||
576 | /* in terms of low power clock */ | 630 | /* timeouts for recovery. one frame IIUC. if counter expires, |
577 | I915_WRITE(MIPI_INIT_COUNT(pipe), intel_dsi->init_count); | 631 | * EOT and stop state. */ |
578 | 632 | ||
579 | /* in terms of txbyteclkhs. actual high to low switch + | 633 | /* |
580 | * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. | 634 | * In burst mode, value greater than one DPI line Time in byte |
581 | * | 635 | * clock (txbyteclkhs) To timeout this timer 1+ of the above |
582 | * XXX: write MIPI_STOP_STATE_STALL? | 636 | * said value is recommended. |
583 | */ | 637 | * |
584 | I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), | 638 | * In non-burst mode, Value greater than one DPI frame time in |
585 | intel_dsi->hs_to_lp_count); | 639 | * byte clock(txbyteclkhs) To timeout this timer 1+ of the above |
586 | 640 | * said value is recommended. | |
587 | /* XXX: low power clock equivalence in terms of byte clock. the number | 641 | * |
588 | * of byte clocks occupied in one low power clock. based on txbyteclkhs | 642 | * In DBI only mode, value greater than one DBI frame time in |
589 | * and txclkesc. txclkesc time / txbyteclk time * (105 + | 643 | * byte clock(txbyteclkhs) To timeout this timer 1+ of the above |
590 | * MIPI_STOP_STATE_STALL) / 105.??? | 644 | * said value is recommended. |
591 | */ | 645 | */ |
592 | I915_WRITE(MIPI_LP_BYTECLK(pipe), intel_dsi->lp_byte_clk); | 646 | |
593 | 647 | if (is_vid_mode(intel_dsi) && | |
594 | /* the bw essential for transmitting 16 long packets containing 252 | 648 | intel_dsi->video_mode_format == VIDEO_MODE_BURST) { |
595 | * bytes meant for dcs write memory command is programmed in this | 649 | I915_WRITE(MIPI_HS_TX_TIMEOUT(port), |
596 | * register in terms of byte clocks. based on dsi transfer rate and the | 650 | txbyteclkhs(adjusted_mode->htotal, bpp, |
597 | * number of lanes configured the time taken to transmit 16 long packets | 651 | intel_dsi->lane_count, |
598 | * in a dsi stream varies. */ | 652 | intel_dsi->burst_mode_ratio) + 1); |
599 | I915_WRITE(MIPI_DBI_BW_CTRL(pipe), intel_dsi->bw_timer); | 653 | } else { |
600 | 654 | I915_WRITE(MIPI_HS_TX_TIMEOUT(port), | |
601 | I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe), | 655 | txbyteclkhs(adjusted_mode->vtotal * |
602 | intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | | 656 | adjusted_mode->htotal, |
603 | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); | 657 | bpp, intel_dsi->lane_count, |
604 | 658 | intel_dsi->burst_mode_ratio) + 1); | |
605 | if (is_vid_mode(intel_dsi)) | 659 | } |
606 | /* Some panels might have resolution which is not a multiple of | 660 | I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout); |
607 | * 64 like 1366 x 768. Enable RANDOM resolution support for such | 661 | I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port), |
608 | * panels by default */ | 662 | intel_dsi->turn_arnd_val); |
609 | I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), | 663 | I915_WRITE(MIPI_DEVICE_RESET_TIMER(port), |
610 | intel_dsi->video_frmt_cfg_bits | | 664 | intel_dsi->rst_timer_val); |
611 | intel_dsi->video_mode_format | | 665 | |
612 | IP_TG_CONFIG | | 666 | /* dphy stuff */ |
613 | RANDOM_DPI_DISPLAY_RESOLUTION); | 667 | |
668 | /* in terms of low power clock */ | ||
669 | I915_WRITE(MIPI_INIT_COUNT(port), | ||
670 | txclkesc(intel_dsi->escape_clk_div, 100)); | ||
671 | |||
672 | |||
673 | /* recovery disables */ | ||
674 | I915_WRITE(MIPI_EOT_DISABLE(port), val); | ||
675 | |||
676 | /* in terms of low power clock */ | ||
677 | I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count); | ||
678 | |||
679 | /* in terms of txbyteclkhs. actual high to low switch + | ||
680 | * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. | ||
681 | * | ||
682 | * XXX: write MIPI_STOP_STATE_STALL? | ||
683 | */ | ||
684 | I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port), | ||
685 | intel_dsi->hs_to_lp_count); | ||
686 | |||
687 | /* XXX: low power clock equivalence in terms of byte clock. | ||
688 | * the number of byte clocks occupied in one low power clock. | ||
689 | * based on txbyteclkhs and txclkesc. | ||
690 | * txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL | ||
691 | * ) / 105.??? | ||
692 | */ | ||
693 | I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk); | ||
694 | |||
695 | /* the bw essential for transmitting 16 long packets containing | ||
696 | * 252 bytes meant for dcs write memory command is programmed in | ||
697 | * this register in terms of byte clocks. based on dsi transfer | ||
698 | * rate and the number of lanes configured the time taken to | ||
699 | * transmit 16 long packets in a dsi stream varies. */ | ||
700 | I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer); | ||
701 | |||
702 | I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port), | ||
703 | intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | | ||
704 | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT); | ||
705 | |||
706 | if (is_vid_mode(intel_dsi)) | ||
707 | /* Some panels might have resolution which is not a | ||
708 | * multiple of 64 like 1366 x 768. Enable RANDOM | ||
709 | * resolution support for such panels by default */ | ||
710 | I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port), | ||
711 | intel_dsi->video_frmt_cfg_bits | | ||
712 | intel_dsi->video_mode_format | | ||
713 | IP_TG_CONFIG | | ||
714 | RANDOM_DPI_DISPLAY_RESOLUTION); | ||
715 | } | ||
614 | } | 716 | } |
615 | 717 | ||
616 | static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) | 718 | static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) |
@@ -748,6 +850,15 @@ void intel_dsi_init(struct drm_device *dev) | |||
748 | intel_connector->get_hw_state = intel_connector_get_hw_state; | 850 | intel_connector->get_hw_state = intel_connector_get_hw_state; |
749 | intel_connector->unregister = intel_connector_unregister; | 851 | intel_connector->unregister = intel_connector_unregister; |
750 | 852 | ||
853 | /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */ | ||
854 | if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { | ||
855 | intel_encoder->crtc_mask = (1 << PIPE_A); | ||
856 | intel_dsi->ports = (1 << PORT_A); | ||
857 | } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) { | ||
858 | intel_encoder->crtc_mask = (1 << PIPE_B); | ||
859 | intel_dsi->ports = (1 << PORT_C); | ||
860 | } | ||
861 | |||
751 | for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { | 862 | for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { |
752 | dsi = &intel_dsi_devices[i]; | 863 | dsi = &intel_dsi_devices[i]; |
753 | intel_dsi->dev = *dsi; | 864 | intel_dsi->dev = *dsi; |
@@ -762,8 +873,6 @@ void intel_dsi_init(struct drm_device *dev) | |||
762 | } | 873 | } |
763 | 874 | ||
764 | intel_encoder->type = INTEL_OUTPUT_DSI; | 875 | intel_encoder->type = INTEL_OUTPUT_DSI; |
765 | intel_encoder->crtc_mask = (1 << 0); /* XXX */ | ||
766 | |||
767 | intel_encoder->cloneable = 0; | 876 | intel_encoder->cloneable = 0; |
768 | drm_connector_init(dev, connector, &intel_dsi_connector_funcs, | 877 | drm_connector_init(dev, connector, &intel_dsi_connector_funcs, |
769 | DRM_MODE_CONNECTOR_DSI); | 878 | DRM_MODE_CONNECTOR_DSI); |
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 657eb5c1b9d8..8fe2064dd804 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
@@ -28,6 +28,11 @@ | |||
28 | #include <drm/drm_crtc.h> | 28 | #include <drm/drm_crtc.h> |
29 | #include "intel_drv.h" | 29 | #include "intel_drv.h" |
30 | 30 | ||
31 | /* Dual Link support */ | ||
32 | #define DSI_DUAL_LINK_NONE 0 | ||
33 | #define DSI_DUAL_LINK_FRONT_BACK 1 | ||
34 | #define DSI_DUAL_LINK_PIXEL_ALT 2 | ||
35 | |||
31 | struct intel_dsi_device { | 36 | struct intel_dsi_device { |
32 | unsigned int panel_id; | 37 | unsigned int panel_id; |
33 | const char *name; | 38 | const char *name; |
@@ -78,6 +83,9 @@ struct intel_dsi { | |||
78 | 83 | ||
79 | struct intel_connector *attached_connector; | 84 | struct intel_connector *attached_connector; |
80 | 85 | ||
86 | /* bit mask of ports being driven */ | ||
87 | u16 ports; | ||
88 | |||
81 | /* if true, use HS mode, otherwise LP */ | 89 | /* if true, use HS mode, otherwise LP */ |
82 | bool hs; | 90 | bool hs; |
83 | 91 | ||
@@ -101,6 +109,8 @@ struct intel_dsi { | |||
101 | u8 clock_stop; | 109 | u8 clock_stop; |
102 | 110 | ||
103 | u8 escape_clk_div; | 111 | u8 escape_clk_div; |
112 | u8 dual_link; | ||
113 | u8 pixel_overlap; | ||
104 | u32 port_bits; | 114 | u32 port_bits; |
105 | u32 bw_timer; | 115 | u32 bw_timer; |
106 | u32 dphy_reg; | 116 | u32 dphy_reg; |
@@ -127,6 +137,22 @@ struct intel_dsi { | |||
127 | u16 panel_pwr_cycle_delay; | 137 | u16 panel_pwr_cycle_delay; |
128 | }; | 138 | }; |
129 | 139 | ||
140 | /* XXX: Transitional before dual port configuration */ | ||
141 | static inline enum port intel_dsi_pipe_to_port(enum pipe pipe) | ||
142 | { | ||
143 | if (pipe == PIPE_A) | ||
144 | return PORT_A; | ||
145 | else if (pipe == PIPE_B) | ||
146 | return PORT_C; | ||
147 | |||
148 | WARN(1, "DSI on pipe %c, assuming port C\n", pipe_name(pipe)); | ||
149 | return PORT_C; | ||
150 | } | ||
151 | |||
152 | #define for_each_dsi_port(__port, __ports_mask) \ | ||
153 | for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ | ||
154 | if ((__ports_mask) & (1 << (__port))) | ||
155 | |||
130 | static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) | 156 | static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) |
131 | { | 157 | { |
132 | return container_of(encoder, struct intel_dsi, base.base); | 158 | return container_of(encoder, struct intel_dsi, base.base); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c index f4767fd2ebeb..562811c1a9d2 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.c +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c | |||
@@ -48,21 +48,19 @@ | |||
48 | * For memory writes, these should probably be used for performance. | 48 | * For memory writes, these should probably be used for performance. |
49 | */ | 49 | */ |
50 | 50 | ||
51 | static void print_stat(struct intel_dsi *intel_dsi) | 51 | static void print_stat(struct intel_dsi *intel_dsi, enum port port) |
52 | { | 52 | { |
53 | struct drm_encoder *encoder = &intel_dsi->base.base; | 53 | struct drm_encoder *encoder = &intel_dsi->base.base; |
54 | struct drm_device *dev = encoder->dev; | 54 | struct drm_device *dev = encoder->dev; |
55 | struct drm_i915_private *dev_priv = dev->dev_private; | 55 | struct drm_i915_private *dev_priv = dev->dev_private; |
56 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
57 | enum pipe pipe = intel_crtc->pipe; | ||
58 | u32 val; | 56 | u32 val; |
59 | 57 | ||
60 | val = I915_READ(MIPI_INTR_STAT(pipe)); | 58 | val = I915_READ(MIPI_INTR_STAT(port)); |
61 | 59 | ||
62 | #define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : "" | 60 | #define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : "" |
63 | DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x" | 61 | DRM_DEBUG_KMS("MIPI_INTR_STAT(%c) = %08x" |
64 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" | 62 | "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" |
65 | "\n", pipe, val, | 63 | "\n", port_name(port), val, |
66 | STAT_BIT(val, TEARING_EFFECT), | 64 | STAT_BIT(val, TEARING_EFFECT), |
67 | STAT_BIT(val, SPL_PKT_SENT_INTERRUPT), | 65 | STAT_BIT(val, SPL_PKT_SENT_INTERRUPT), |
68 | STAT_BIT(val, GEN_READ_DATA_AVAIL), | 66 | STAT_BIT(val, GEN_READ_DATA_AVAIL), |
@@ -104,34 +102,31 @@ enum dsi_type { | |||
104 | }; | 102 | }; |
105 | 103 | ||
106 | /* enable or disable command mode hs transmissions */ | 104 | /* enable or disable command mode hs transmissions */ |
107 | void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable) | 105 | void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable, |
106 | enum port port) | ||
108 | { | 107 | { |
109 | struct drm_encoder *encoder = &intel_dsi->base.base; | 108 | struct drm_encoder *encoder = &intel_dsi->base.base; |
110 | struct drm_device *dev = encoder->dev; | 109 | struct drm_device *dev = encoder->dev; |
111 | struct drm_i915_private *dev_priv = dev->dev_private; | 110 | struct drm_i915_private *dev_priv = dev->dev_private; |
112 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
113 | enum pipe pipe = intel_crtc->pipe; | ||
114 | u32 temp; | 111 | u32 temp; |
115 | u32 mask = DBI_FIFO_EMPTY; | 112 | u32 mask = DBI_FIFO_EMPTY; |
116 | 113 | ||
117 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50)) | 114 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 50)) |
118 | DRM_ERROR("Timeout waiting for DBI FIFO empty\n"); | 115 | DRM_ERROR("Timeout waiting for DBI FIFO empty\n"); |
119 | 116 | ||
120 | temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe)); | 117 | temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(port)); |
121 | temp &= DBI_HS_LP_MODE_MASK; | 118 | temp &= DBI_HS_LP_MODE_MASK; |
122 | I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE); | 119 | I915_WRITE(MIPI_HS_LP_DBI_ENABLE(port), enable ? DBI_HS_MODE : DBI_LP_MODE); |
123 | 120 | ||
124 | intel_dsi->hs = enable; | 121 | intel_dsi->hs = enable; |
125 | } | 122 | } |
126 | 123 | ||
127 | static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel, | 124 | static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel, |
128 | u8 data_type, u16 data) | 125 | u8 data_type, u16 data, enum port port) |
129 | { | 126 | { |
130 | struct drm_encoder *encoder = &intel_dsi->base.base; | 127 | struct drm_encoder *encoder = &intel_dsi->base.base; |
131 | struct drm_device *dev = encoder->dev; | 128 | struct drm_device *dev = encoder->dev; |
132 | struct drm_i915_private *dev_priv = dev->dev_private; | 129 | struct drm_i915_private *dev_priv = dev->dev_private; |
133 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
134 | enum pipe pipe = intel_crtc->pipe; | ||
135 | u32 ctrl_reg; | 130 | u32 ctrl_reg; |
136 | u32 ctrl; | 131 | u32 ctrl; |
137 | u32 mask; | 132 | u32 mask; |
@@ -140,16 +135,16 @@ static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel, | |||
140 | channel, data_type, data); | 135 | channel, data_type, data); |
141 | 136 | ||
142 | if (intel_dsi->hs) { | 137 | if (intel_dsi->hs) { |
143 | ctrl_reg = MIPI_HS_GEN_CTRL(pipe); | 138 | ctrl_reg = MIPI_HS_GEN_CTRL(port); |
144 | mask = HS_CTRL_FIFO_FULL; | 139 | mask = HS_CTRL_FIFO_FULL; |
145 | } else { | 140 | } else { |
146 | ctrl_reg = MIPI_LP_GEN_CTRL(pipe); | 141 | ctrl_reg = MIPI_LP_GEN_CTRL(port); |
147 | mask = LP_CTRL_FIFO_FULL; | 142 | mask = LP_CTRL_FIFO_FULL; |
148 | } | 143 | } |
149 | 144 | ||
150 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) { | 145 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50)) { |
151 | DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); | 146 | DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); |
152 | print_stat(intel_dsi); | 147 | print_stat(intel_dsi, port); |
153 | } | 148 | } |
154 | 149 | ||
155 | /* | 150 | /* |
@@ -167,13 +162,11 @@ static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel, | |||
167 | } | 162 | } |
168 | 163 | ||
169 | static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel, | 164 | static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel, |
170 | u8 data_type, const u8 *data, int len) | 165 | u8 data_type, const u8 *data, int len, enum port port) |
171 | { | 166 | { |
172 | struct drm_encoder *encoder = &intel_dsi->base.base; | 167 | struct drm_encoder *encoder = &intel_dsi->base.base; |
173 | struct drm_device *dev = encoder->dev; | 168 | struct drm_device *dev = encoder->dev; |
174 | struct drm_i915_private *dev_priv = dev->dev_private; | 169 | struct drm_i915_private *dev_priv = dev->dev_private; |
175 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
176 | enum pipe pipe = intel_crtc->pipe; | ||
177 | u32 data_reg; | 170 | u32 data_reg; |
178 | int i, j, n; | 171 | int i, j, n; |
179 | u32 mask; | 172 | u32 mask; |
@@ -182,14 +175,14 @@ static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel, | |||
182 | channel, data_type, len); | 175 | channel, data_type, len); |
183 | 176 | ||
184 | if (intel_dsi->hs) { | 177 | if (intel_dsi->hs) { |
185 | data_reg = MIPI_HS_GEN_DATA(pipe); | 178 | data_reg = MIPI_HS_GEN_DATA(port); |
186 | mask = HS_DATA_FIFO_FULL; | 179 | mask = HS_DATA_FIFO_FULL; |
187 | } else { | 180 | } else { |
188 | data_reg = MIPI_LP_GEN_DATA(pipe); | 181 | data_reg = MIPI_LP_GEN_DATA(port); |
189 | mask = LP_DATA_FIFO_FULL; | 182 | mask = LP_DATA_FIFO_FULL; |
190 | } | 183 | } |
191 | 184 | ||
192 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) | 185 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == 0, 50)) |
193 | DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); | 186 | DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); |
194 | 187 | ||
195 | for (i = 0; i < len; i += n) { | 188 | for (i = 0; i < len; i += n) { |
@@ -204,12 +197,12 @@ static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel, | |||
204 | * dwords, then wait for not set, then continue. */ | 197 | * dwords, then wait for not set, then continue. */ |
205 | } | 198 | } |
206 | 199 | ||
207 | return dsi_vc_send_short(intel_dsi, channel, data_type, len); | 200 | return dsi_vc_send_short(intel_dsi, channel, data_type, len, port); |
208 | } | 201 | } |
209 | 202 | ||
210 | static int dsi_vc_write_common(struct intel_dsi *intel_dsi, | 203 | static int dsi_vc_write_common(struct intel_dsi *intel_dsi, |
211 | int channel, const u8 *data, int len, | 204 | int channel, const u8 *data, int len, |
212 | enum dsi_type type) | 205 | enum dsi_type type, enum port port) |
213 | { | 206 | { |
214 | int ret; | 207 | int ret; |
215 | 208 | ||
@@ -217,50 +210,54 @@ static int dsi_vc_write_common(struct intel_dsi *intel_dsi, | |||
217 | BUG_ON(type == DSI_GENERIC); | 210 | BUG_ON(type == DSI_GENERIC); |
218 | ret = dsi_vc_send_short(intel_dsi, channel, | 211 | ret = dsi_vc_send_short(intel_dsi, channel, |
219 | MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, | 212 | MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, |
220 | 0); | 213 | 0, port); |
221 | } else if (len == 1) { | 214 | } else if (len == 1) { |
222 | ret = dsi_vc_send_short(intel_dsi, channel, | 215 | ret = dsi_vc_send_short(intel_dsi, channel, |
223 | type == DSI_GENERIC ? | 216 | type == DSI_GENERIC ? |
224 | MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM : | 217 | MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM : |
225 | MIPI_DSI_DCS_SHORT_WRITE, data[0]); | 218 | MIPI_DSI_DCS_SHORT_WRITE, data[0], |
219 | port); | ||
226 | } else if (len == 2) { | 220 | } else if (len == 2) { |
227 | ret = dsi_vc_send_short(intel_dsi, channel, | 221 | ret = dsi_vc_send_short(intel_dsi, channel, |
228 | type == DSI_GENERIC ? | 222 | type == DSI_GENERIC ? |
229 | MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM : | 223 | MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM : |
230 | MIPI_DSI_DCS_SHORT_WRITE_PARAM, | 224 | MIPI_DSI_DCS_SHORT_WRITE_PARAM, |
231 | (data[1] << 8) | data[0]); | 225 | (data[1] << 8) | data[0], port); |
232 | } else { | 226 | } else { |
233 | ret = dsi_vc_send_long(intel_dsi, channel, | 227 | ret = dsi_vc_send_long(intel_dsi, channel, |
234 | type == DSI_GENERIC ? | 228 | type == DSI_GENERIC ? |
235 | MIPI_DSI_GENERIC_LONG_WRITE : | 229 | MIPI_DSI_GENERIC_LONG_WRITE : |
236 | MIPI_DSI_DCS_LONG_WRITE, data, len); | 230 | MIPI_DSI_DCS_LONG_WRITE, data, len, |
231 | port); | ||
237 | } | 232 | } |
238 | 233 | ||
239 | return ret; | 234 | return ret; |
240 | } | 235 | } |
241 | 236 | ||
242 | int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, | 237 | int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, |
243 | const u8 *data, int len) | 238 | const u8 *data, int len, enum port port) |
244 | { | 239 | { |
245 | return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS); | 240 | return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS, |
241 | port); | ||
246 | } | 242 | } |
247 | 243 | ||
248 | int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel, | 244 | int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel, |
249 | const u8 *data, int len) | 245 | const u8 *data, int len, enum port port) |
250 | { | 246 | { |
251 | return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC); | 247 | return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC, |
248 | port); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi, | 251 | static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi, |
255 | int channel, u8 dcs_cmd) | 252 | int channel, u8 dcs_cmd, enum port port) |
256 | { | 253 | { |
257 | return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ, | 254 | return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ, |
258 | dcs_cmd); | 255 | dcs_cmd, port); |
259 | } | 256 | } |
260 | 257 | ||
261 | static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi, | 258 | static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi, |
262 | int channel, u8 *reqdata, | 259 | int channel, u8 *reqdata, |
263 | int reqlen) | 260 | int reqlen, enum port port) |
264 | { | 261 | { |
265 | u16 data; | 262 | u16 data; |
266 | u8 data_type; | 263 | u8 data_type; |
@@ -282,24 +279,22 @@ static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi, | |||
282 | BUG(); | 279 | BUG(); |
283 | } | 280 | } |
284 | 281 | ||
285 | return dsi_vc_send_short(intel_dsi, channel, data_type, data); | 282 | return dsi_vc_send_short(intel_dsi, channel, data_type, data, port); |
286 | } | 283 | } |
287 | 284 | ||
288 | static int dsi_read_data_return(struct intel_dsi *intel_dsi, | 285 | static int dsi_read_data_return(struct intel_dsi *intel_dsi, |
289 | u8 *buf, int buflen) | 286 | u8 *buf, int buflen, enum port port) |
290 | { | 287 | { |
291 | struct drm_encoder *encoder = &intel_dsi->base.base; | 288 | struct drm_encoder *encoder = &intel_dsi->base.base; |
292 | struct drm_device *dev = encoder->dev; | 289 | struct drm_device *dev = encoder->dev; |
293 | struct drm_i915_private *dev_priv = dev->dev_private; | 290 | struct drm_i915_private *dev_priv = dev->dev_private; |
294 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
295 | enum pipe pipe = intel_crtc->pipe; | ||
296 | int i, len = 0; | 291 | int i, len = 0; |
297 | u32 data_reg, val; | 292 | u32 data_reg, val; |
298 | 293 | ||
299 | if (intel_dsi->hs) { | 294 | if (intel_dsi->hs) { |
300 | data_reg = MIPI_HS_GEN_DATA(pipe); | 295 | data_reg = MIPI_HS_GEN_DATA(port); |
301 | } else { | 296 | } else { |
302 | data_reg = MIPI_LP_GEN_DATA(pipe); | 297 | data_reg = MIPI_LP_GEN_DATA(port); |
303 | } | 298 | } |
304 | 299 | ||
305 | while (len < buflen) { | 300 | while (len < buflen) { |
@@ -312,13 +307,11 @@ static int dsi_read_data_return(struct intel_dsi *intel_dsi, | |||
312 | } | 307 | } |
313 | 308 | ||
314 | int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, | 309 | int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, |
315 | u8 *buf, int buflen) | 310 | u8 *buf, int buflen, enum port port) |
316 | { | 311 | { |
317 | struct drm_encoder *encoder = &intel_dsi->base.base; | 312 | struct drm_encoder *encoder = &intel_dsi->base.base; |
318 | struct drm_device *dev = encoder->dev; | 313 | struct drm_device *dev = encoder->dev; |
319 | struct drm_i915_private *dev_priv = dev->dev_private; | 314 | struct drm_i915_private *dev_priv = dev->dev_private; |
320 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
321 | enum pipe pipe = intel_crtc->pipe; | ||
322 | u32 mask; | 315 | u32 mask; |
323 | int ret; | 316 | int ret; |
324 | 317 | ||
@@ -327,17 +320,17 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, | |||
327 | * longer than MIPI_MAX_RETURN_PKT_SIZE | 320 | * longer than MIPI_MAX_RETURN_PKT_SIZE |
328 | */ | 321 | */ |
329 | 322 | ||
330 | I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL); | 323 | I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); |
331 | 324 | ||
332 | ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd); | 325 | ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd, port); |
333 | if (ret) | 326 | if (ret) |
334 | return ret; | 327 | return ret; |
335 | 328 | ||
336 | mask = GEN_READ_DATA_AVAIL; | 329 | mask = GEN_READ_DATA_AVAIL; |
337 | if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50)) | 330 | if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50)) |
338 | DRM_ERROR("Timeout waiting for read data.\n"); | 331 | DRM_ERROR("Timeout waiting for read data.\n"); |
339 | 332 | ||
340 | ret = dsi_read_data_return(intel_dsi, buf, buflen); | 333 | ret = dsi_read_data_return(intel_dsi, buf, buflen, port); |
341 | if (ret < 0) | 334 | if (ret < 0) |
342 | return ret; | 335 | return ret; |
343 | 336 | ||
@@ -348,13 +341,11 @@ int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, | |||
348 | } | 341 | } |
349 | 342 | ||
350 | int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, | 343 | int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, |
351 | u8 *reqdata, int reqlen, u8 *buf, int buflen) | 344 | u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port) |
352 | { | 345 | { |
353 | struct drm_encoder *encoder = &intel_dsi->base.base; | 346 | struct drm_encoder *encoder = &intel_dsi->base.base; |
354 | struct drm_device *dev = encoder->dev; | 347 | struct drm_device *dev = encoder->dev; |
355 | struct drm_i915_private *dev_priv = dev->dev_private; | 348 | struct drm_i915_private *dev_priv = dev->dev_private; |
356 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | ||
357 | enum pipe pipe = intel_crtc->pipe; | ||
358 | u32 mask; | 349 | u32 mask; |
359 | int ret; | 350 | int ret; |
360 | 351 | ||
@@ -363,18 +354,18 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, | |||
363 | * longer than MIPI_MAX_RETURN_PKT_SIZE | 354 | * longer than MIPI_MAX_RETURN_PKT_SIZE |
364 | */ | 355 | */ |
365 | 356 | ||
366 | I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL); | 357 | I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); |
367 | 358 | ||
368 | ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata, | 359 | ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata, |
369 | reqlen); | 360 | reqlen, port); |
370 | if (ret) | 361 | if (ret) |
371 | return ret; | 362 | return ret; |
372 | 363 | ||
373 | mask = GEN_READ_DATA_AVAIL; | 364 | mask = GEN_READ_DATA_AVAIL; |
374 | if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50)) | 365 | if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 50)) |
375 | DRM_ERROR("Timeout waiting for read data.\n"); | 366 | DRM_ERROR("Timeout waiting for read data.\n"); |
376 | 367 | ||
377 | ret = dsi_read_data_return(intel_dsi, buf, buflen); | 368 | ret = dsi_read_data_return(intel_dsi, buf, buflen, port); |
378 | if (ret < 0) | 369 | if (ret < 0) |
379 | return ret; | 370 | return ret; |
380 | 371 | ||
@@ -394,8 +385,7 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs) | |||
394 | struct drm_encoder *encoder = &intel_dsi->base.base; | 385 | struct drm_encoder *encoder = &intel_dsi->base.base; |
395 | struct drm_device *dev = encoder->dev; | 386 | struct drm_device *dev = encoder->dev; |
396 | struct drm_i915_private *dev_priv = dev->dev_private; | 387 | struct drm_i915_private *dev_priv = dev->dev_private; |
397 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 388 | enum port port; |
398 | enum pipe pipe = intel_crtc->pipe; | ||
399 | u32 mask; | 389 | u32 mask; |
400 | 390 | ||
401 | /* XXX: pipe, hs */ | 391 | /* XXX: pipe, hs */ |
@@ -404,18 +394,23 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs) | |||
404 | else | 394 | else |
405 | cmd |= DPI_LP_MODE; | 395 | cmd |= DPI_LP_MODE; |
406 | 396 | ||
407 | /* clear bit */ | 397 | for_each_dsi_port(port, intel_dsi->ports) { |
408 | I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT); | 398 | /* clear bit */ |
399 | I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT); | ||
409 | 400 | ||
410 | /* XXX: old code skips write if control unchanged */ | 401 | /* XXX: old code skips write if control unchanged */ |
411 | if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe))) | 402 | if (cmd == I915_READ(MIPI_DPI_CONTROL(port))) |
412 | DRM_ERROR("Same special packet %02x twice in a row.\n", cmd); | 403 | DRM_ERROR("Same special packet %02x twice in a row.\n", |
404 | cmd); | ||
413 | 405 | ||
414 | I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd); | 406 | I915_WRITE(MIPI_DPI_CONTROL(port), cmd); |
415 | 407 | ||
416 | mask = SPL_PKT_SENT_INTERRUPT; | 408 | mask = SPL_PKT_SENT_INTERRUPT; |
417 | if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100)) | 409 | if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, |
418 | DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); | 410 | 100)) |
411 | DRM_ERROR("Video mode command 0x%08x send failed.\n", | ||
412 | cmd); | ||
413 | } | ||
419 | 414 | ||
420 | return 0; | 415 | return 0; |
421 | } | 416 | } |
@@ -426,12 +421,12 @@ void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi) | |||
426 | struct drm_device *dev = encoder->dev; | 421 | struct drm_device *dev = encoder->dev; |
427 | struct drm_i915_private *dev_priv = dev->dev_private; | 422 | struct drm_i915_private *dev_priv = dev->dev_private; |
428 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); | 423 | struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
429 | enum pipe pipe = intel_crtc->pipe; | 424 | enum port port = intel_dsi_pipe_to_port(intel_crtc->pipe); |
430 | u32 mask; | 425 | u32 mask; |
431 | 426 | ||
432 | mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | | 427 | mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | |
433 | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; | 428 | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; |
434 | 429 | ||
435 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100)) | 430 | if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100)) |
436 | DRM_ERROR("DPI FIFOs are not empty\n"); | 431 | DRM_ERROR("DPI FIFOs are not empty\n"); |
437 | } | 432 | } |
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h index 46aa1acc00eb..326a5ac55561 100644 --- a/drivers/gpu/drm/i915/intel_dsi_cmd.h +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h | |||
@@ -36,77 +36,81 @@ | |||
36 | #define DPI_LP_MODE_EN false | 36 | #define DPI_LP_MODE_EN false |
37 | #define DPI_HS_MODE_EN true | 37 | #define DPI_HS_MODE_EN true |
38 | 38 | ||
39 | void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable); | 39 | void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable, |
40 | enum port port); | ||
40 | 41 | ||
41 | int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, | 42 | int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel, |
42 | const u8 *data, int len); | 43 | const u8 *data, int len, enum port port); |
43 | 44 | ||
44 | int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel, | 45 | int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel, |
45 | const u8 *data, int len); | 46 | const u8 *data, int len, enum port port); |
46 | 47 | ||
47 | int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, | 48 | int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd, |
48 | u8 *buf, int buflen); | 49 | u8 *buf, int buflen, enum port port); |
49 | 50 | ||
50 | int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, | 51 | int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel, |
51 | u8 *reqdata, int reqlen, u8 *buf, int buflen); | 52 | u8 *reqdata, int reqlen, u8 *buf, int buflen, enum port port); |
52 | 53 | ||
53 | int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); | 54 | int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs); |
54 | void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi); | 55 | void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi); |
55 | 56 | ||
56 | /* XXX: questionable write helpers */ | 57 | /* XXX: questionable write helpers */ |
57 | static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, | 58 | static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi, |
58 | int channel, u8 dcs_cmd) | 59 | int channel, u8 dcs_cmd, enum port port) |
59 | { | 60 | { |
60 | return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1); | 61 | return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1, port); |
61 | } | 62 | } |
62 | 63 | ||
63 | static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi, | 64 | static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi, |
64 | int channel, u8 dcs_cmd, u8 param) | 65 | int channel, u8 dcs_cmd, u8 param, enum port port) |
65 | { | 66 | { |
66 | u8 buf[2] = { dcs_cmd, param }; | 67 | u8 buf[2] = { dcs_cmd, param }; |
67 | return dsi_vc_dcs_write(intel_dsi, channel, buf, 2); | 68 | return dsi_vc_dcs_write(intel_dsi, channel, buf, 2, port); |
68 | } | 69 | } |
69 | 70 | ||
70 | static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi, | 71 | static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi, |
71 | int channel) | 72 | int channel, enum port port) |
72 | { | 73 | { |
73 | return dsi_vc_generic_write(intel_dsi, channel, NULL, 0); | 74 | return dsi_vc_generic_write(intel_dsi, channel, NULL, 0, port); |
74 | } | 75 | } |
75 | 76 | ||
76 | static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi, | 77 | static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi, |
77 | int channel, u8 param) | 78 | int channel, u8 param, enum port port) |
78 | { | 79 | { |
79 | return dsi_vc_generic_write(intel_dsi, channel, ¶m, 1); | 80 | return dsi_vc_generic_write(intel_dsi, channel, ¶m, 1, port); |
80 | } | 81 | } |
81 | 82 | ||
82 | static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi, | 83 | static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi, |
83 | int channel, u8 param1, u8 param2) | 84 | int channel, u8 param1, u8 param2, enum port port) |
84 | { | 85 | { |
85 | u8 buf[2] = { param1, param2 }; | 86 | u8 buf[2] = { param1, param2 }; |
86 | return dsi_vc_generic_write(intel_dsi, channel, buf, 2); | 87 | return dsi_vc_generic_write(intel_dsi, channel, buf, 2, port); |
87 | } | 88 | } |
88 | 89 | ||
89 | /* XXX: questionable read helpers */ | 90 | /* XXX: questionable read helpers */ |
90 | static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi, | 91 | static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi, |
91 | int channel, u8 *buf, int buflen) | 92 | int channel, u8 *buf, int buflen, enum port port) |
92 | { | 93 | { |
93 | return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen); | 94 | return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen, |
95 | port); | ||
94 | } | 96 | } |
95 | 97 | ||
96 | static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi, | 98 | static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi, |
97 | int channel, u8 param, u8 *buf, | 99 | int channel, u8 param, u8 *buf, |
98 | int buflen) | 100 | int buflen, enum port port) |
99 | { | 101 | { |
100 | return dsi_vc_generic_read(intel_dsi, channel, ¶m, 1, buf, buflen); | 102 | return dsi_vc_generic_read(intel_dsi, channel, ¶m, 1, buf, buflen, |
103 | port); | ||
101 | } | 104 | } |
102 | 105 | ||
103 | static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi, | 106 | static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi, |
104 | int channel, u8 param1, u8 param2, | 107 | int channel, u8 param1, u8 param2, |
105 | u8 *buf, int buflen) | 108 | u8 *buf, int buflen, enum port port) |
106 | { | 109 | { |
107 | u8 req[2] = { param1, param2 }; | 110 | u8 req[2] = { param1, param2 }; |
108 | 111 | ||
109 | return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen); | 112 | return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen, |
113 | port); | ||
110 | } | 114 | } |
111 | 115 | ||
112 | 116 | ||
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index f6bdd44069ce..5493aef5a6a3 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c | |||
@@ -94,16 +94,31 @@ static struct gpio_table gtable[] = { | |||
94 | { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0} | 94 | { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0} |
95 | }; | 95 | }; |
96 | 96 | ||
97 | static inline enum port intel_dsi_seq_port_to_port(u8 port) | ||
98 | { | ||
99 | return port ? PORT_C : PORT_A; | ||
100 | } | ||
101 | |||
97 | static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) | 102 | static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) |
98 | { | 103 | { |
99 | u8 type, byte, mode, vc, port; | 104 | u8 type, byte, mode, vc, seq_port; |
100 | u16 len; | 105 | u16 len; |
106 | enum port port; | ||
101 | 107 | ||
102 | byte = *data++; | 108 | byte = *data++; |
103 | mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1; | 109 | mode = (byte >> MIPI_TRANSFER_MODE_SHIFT) & 0x1; |
104 | vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3; | 110 | vc = (byte >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 0x3; |
105 | port = (byte >> MIPI_PORT_SHIFT) & 0x3; | 111 | seq_port = (byte >> MIPI_PORT_SHIFT) & 0x3; |
106 | 112 | ||
113 | /* For DSI single link on Port A & C, the seq_port value which is | ||
114 | * parsed from Sequence Block#53 of VBT has been set to 0 | ||
115 | * Now, read/write of packets for the DSI single link on Port A and | ||
116 | * Port C will based on the DVO port from VBT block 2. | ||
117 | */ | ||
118 | if (intel_dsi->ports == (1 << PORT_C)) | ||
119 | port = PORT_C; | ||
120 | else | ||
121 | port = intel_dsi_seq_port_to_port(seq_port); | ||
107 | /* LP or HS mode */ | 122 | /* LP or HS mode */ |
108 | intel_dsi->hs = mode; | 123 | intel_dsi->hs = mode; |
109 | 124 | ||
@@ -115,13 +130,13 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) | |||
115 | 130 | ||
116 | switch (type) { | 131 | switch (type) { |
117 | case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: | 132 | case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: |
118 | dsi_vc_generic_write_0(intel_dsi, vc); | 133 | dsi_vc_generic_write_0(intel_dsi, vc, port); |
119 | break; | 134 | break; |
120 | case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: | 135 | case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: |
121 | dsi_vc_generic_write_1(intel_dsi, vc, *data); | 136 | dsi_vc_generic_write_1(intel_dsi, vc, *data, port); |
122 | break; | 137 | break; |
123 | case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: | 138 | case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: |
124 | dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1)); | 139 | dsi_vc_generic_write_2(intel_dsi, vc, *data, *(data + 1), port); |
125 | break; | 140 | break; |
126 | case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: | 141 | case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: |
127 | case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: | 142 | case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: |
@@ -129,19 +144,19 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data) | |||
129 | DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n"); | 144 | DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n"); |
130 | break; | 145 | break; |
131 | case MIPI_DSI_GENERIC_LONG_WRITE: | 146 | case MIPI_DSI_GENERIC_LONG_WRITE: |
132 | dsi_vc_generic_write(intel_dsi, vc, data, len); | 147 | dsi_vc_generic_write(intel_dsi, vc, data, len, port); |
133 | break; | 148 | break; |
134 | case MIPI_DSI_DCS_SHORT_WRITE: | 149 | case MIPI_DSI_DCS_SHORT_WRITE: |
135 | dsi_vc_dcs_write_0(intel_dsi, vc, *data); | 150 | dsi_vc_dcs_write_0(intel_dsi, vc, *data, port); |
136 | break; | 151 | break; |
137 | case MIPI_DSI_DCS_SHORT_WRITE_PARAM: | 152 | case MIPI_DSI_DCS_SHORT_WRITE_PARAM: |
138 | dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1)); | 153 | dsi_vc_dcs_write_1(intel_dsi, vc, *data, *(data + 1), port); |
139 | break; | 154 | break; |
140 | case MIPI_DSI_DCS_READ: | 155 | case MIPI_DSI_DCS_READ: |
141 | DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n"); | 156 | DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n"); |
142 | break; | 157 | break; |
143 | case MIPI_DSI_DCS_LONG_WRITE: | 158 | case MIPI_DSI_DCS_LONG_WRITE: |
144 | dsi_vc_dcs_write(intel_dsi, vc, data, len); | 159 | dsi_vc_dcs_write(intel_dsi, vc, data, len, port); |
145 | break; | 160 | break; |
146 | } | 161 | } |
147 | 162 | ||
@@ -280,6 +295,11 @@ static bool generic_init(struct intel_dsi_device *dsi) | |||
280 | intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; | 295 | intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; |
281 | intel_dsi->lane_count = mipi_config->lane_cnt + 1; | 296 | intel_dsi->lane_count = mipi_config->lane_cnt + 1; |
282 | intel_dsi->pixel_format = mipi_config->videomode_color_format << 7; | 297 | intel_dsi->pixel_format = mipi_config->videomode_color_format << 7; |
298 | intel_dsi->dual_link = mipi_config->dual_link; | ||
299 | intel_dsi->pixel_overlap = mipi_config->pixel_overlap; | ||
300 | |||
301 | if (intel_dsi->dual_link) | ||
302 | intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); | ||
283 | 303 | ||
284 | if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666) | 304 | if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666) |
285 | bits_per_pixel = 18; | 305 | bits_per_pixel = 18; |
@@ -299,6 +319,20 @@ static bool generic_init(struct intel_dsi_device *dsi) | |||
299 | 319 | ||
300 | pclk = mode->clock; | 320 | pclk = mode->clock; |
301 | 321 | ||
322 | /* In dual link mode each port needs half of pixel clock */ | ||
323 | if (intel_dsi->dual_link) { | ||
324 | pclk = pclk / 2; | ||
325 | |||
326 | /* we can enable pixel_overlap if needed by panel. In this | ||
327 | * case we need to increase the pixelclock for extra pixels | ||
328 | */ | ||
329 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { | ||
330 | pclk += DIV_ROUND_UP(mode->vtotal * | ||
331 | intel_dsi->pixel_overlap * | ||
332 | 60, 1000); | ||
333 | } | ||
334 | } | ||
335 | |||
302 | /* Burst Mode Ratio | 336 | /* Burst Mode Ratio |
303 | * Target ddr frequency from VBT / non burst ddr freq | 337 | * Target ddr frequency from VBT / non burst ddr freq |
304 | * multiply by 100 to preserve remainder | 338 | * multiply by 100 to preserve remainder |
@@ -493,6 +527,12 @@ static bool generic_init(struct intel_dsi_device *dsi) | |||
493 | DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ? | 527 | DRM_DEBUG_KMS("Clockstop %s\n", intel_dsi->clock_stop ? |
494 | "disabled" : "enabled"); | 528 | "disabled" : "enabled"); |
495 | DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); | 529 | DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); |
530 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) | ||
531 | DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); | ||
532 | else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) | ||
533 | DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); | ||
534 | else | ||
535 | DRM_DEBUG_KMS("Dual link: NONE\n"); | ||
496 | DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format); | 536 | DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format); |
497 | DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div); | 537 | DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div); |
498 | DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); | 538 | DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); |
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index fa7a6ca34cd6..3622d0bafdf8 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c | |||
@@ -241,7 +241,11 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder) | |||
241 | return; | 241 | return; |
242 | } | 242 | } |
243 | 243 | ||
244 | dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL; | 244 | if (intel_dsi->ports & (1 << PORT_A)) |
245 | dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL; | ||
246 | |||
247 | if (intel_dsi->ports & (1 << PORT_C)) | ||
248 | dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL; | ||
245 | 249 | ||
246 | DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n", | 250 | DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n", |
247 | dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl); | 251 | dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl); |
@@ -269,12 +273,14 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder) | |||
269 | tmp |= DSI_PLL_VCO_EN; | 273 | tmp |= DSI_PLL_VCO_EN; |
270 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp); | 274 | vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp); |
271 | 275 | ||
272 | mutex_unlock(&dev_priv->dpio_lock); | 276 | if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) & |
277 | DSI_PLL_LOCK, 20)) { | ||
273 | 278 | ||
274 | if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) { | 279 | mutex_unlock(&dev_priv->dpio_lock); |
275 | DRM_ERROR("DSI PLL lock failed\n"); | 280 | DRM_ERROR("DSI PLL lock failed\n"); |
276 | return; | 281 | return; |
277 | } | 282 | } |
283 | mutex_unlock(&dev_priv->dpio_lock); | ||
278 | 284 | ||
279 | DRM_DEBUG_KMS("DSI PLL locked\n"); | 285 | DRM_DEBUG_KMS("DSI PLL locked\n"); |
280 | } | 286 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c new file mode 100644 index 000000000000..4daceaeeb30d --- /dev/null +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -0,0 +1,701 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | */ | ||
23 | |||
24 | /** | ||
25 | * DOC: Frame Buffer Compression (FBC) | ||
26 | * | ||
27 | * FBC tries to save memory bandwidth (and so power consumption) by | ||
28 | * compressing the amount of memory used by the display. It is total | ||
29 | * transparent to user space and completely handled in the kernel. | ||
30 | * | ||
31 | * The benefits of FBC are mostly visible with solid backgrounds and | ||
32 | * variation-less patterns. It comes from keeping the memory footprint small | ||
33 | * and having fewer memory pages opened and accessed for refreshing the display. | ||
34 | * | ||
35 | * i915 is responsible to reserve stolen memory for FBC and configure its | ||
36 | * offset on proper registers. The hardware takes care of all | ||
37 | * compress/decompress. However there are many known cases where we have to | ||
38 | * forcibly disable it to allow proper screen updates. | ||
39 | */ | ||
40 | |||
41 | #include "intel_drv.h" | ||
42 | #include "i915_drv.h" | ||
43 | |||
44 | static void i8xx_fbc_disable(struct drm_device *dev) | ||
45 | { | ||
46 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
47 | u32 fbc_ctl; | ||
48 | |||
49 | dev_priv->fbc.enabled = false; | ||
50 | |||
51 | /* Disable compression */ | ||
52 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
53 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
54 | return; | ||
55 | |||
56 | fbc_ctl &= ~FBC_CTL_EN; | ||
57 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
58 | |||
59 | /* Wait for compressing bit to clear */ | ||
60 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | ||
61 | DRM_DEBUG_KMS("FBC idle timed out\n"); | ||
62 | return; | ||
63 | } | ||
64 | |||
65 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
66 | } | ||
67 | |||
68 | static void i8xx_fbc_enable(struct drm_crtc *crtc) | ||
69 | { | ||
70 | struct drm_device *dev = crtc->dev; | ||
71 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
72 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
73 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
74 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
75 | int cfb_pitch; | ||
76 | int i; | ||
77 | u32 fbc_ctl; | ||
78 | |||
79 | dev_priv->fbc.enabled = true; | ||
80 | |||
81 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; | ||
82 | if (fb->pitches[0] < cfb_pitch) | ||
83 | cfb_pitch = fb->pitches[0]; | ||
84 | |||
85 | /* FBC_CTL wants 32B or 64B units */ | ||
86 | if (IS_GEN2(dev)) | ||
87 | cfb_pitch = (cfb_pitch / 32) - 1; | ||
88 | else | ||
89 | cfb_pitch = (cfb_pitch / 64) - 1; | ||
90 | |||
91 | /* Clear old tags */ | ||
92 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | ||
93 | I915_WRITE(FBC_TAG + (i * 4), 0); | ||
94 | |||
95 | if (IS_GEN4(dev)) { | ||
96 | u32 fbc_ctl2; | ||
97 | |||
98 | /* Set it up... */ | ||
99 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | ||
100 | fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); | ||
101 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
102 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
103 | } | ||
104 | |||
105 | /* enable it... */ | ||
106 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
107 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | ||
108 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
109 | if (IS_I945GM(dev)) | ||
110 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | ||
111 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | ||
112 | fbc_ctl |= obj->fence_reg; | ||
113 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
114 | |||
115 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", | ||
116 | cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); | ||
117 | } | ||
118 | |||
119 | static bool i8xx_fbc_enabled(struct drm_device *dev) | ||
120 | { | ||
121 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
122 | |||
123 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | ||
124 | } | ||
125 | |||
126 | static void g4x_fbc_enable(struct drm_crtc *crtc) | ||
127 | { | ||
128 | struct drm_device *dev = crtc->dev; | ||
129 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
130 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
131 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
132 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
133 | u32 dpfc_ctl; | ||
134 | |||
135 | dev_priv->fbc.enabled = true; | ||
136 | |||
137 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; | ||
138 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
139 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
140 | else | ||
141 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
142 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; | ||
143 | |||
144 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | ||
145 | |||
146 | /* enable it... */ | ||
147 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
148 | |||
149 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
150 | } | ||
151 | |||
152 | static void g4x_fbc_disable(struct drm_device *dev) | ||
153 | { | ||
154 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
155 | u32 dpfc_ctl; | ||
156 | |||
157 | dev_priv->fbc.enabled = false; | ||
158 | |||
159 | /* Disable compression */ | ||
160 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
161 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
162 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
163 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
164 | |||
165 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | static bool g4x_fbc_enabled(struct drm_device *dev) | ||
170 | { | ||
171 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
172 | |||
173 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | ||
174 | } | ||
175 | |||
176 | static void snb_fbc_blit_update(struct drm_device *dev) | ||
177 | { | ||
178 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
179 | u32 blt_ecoskpd; | ||
180 | |||
181 | /* Make sure blitter notifies FBC of writes */ | ||
182 | |||
183 | /* Blitter is part of Media powerwell on VLV. No impact of | ||
184 | * his param in other platforms for now */ | ||
185 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); | ||
186 | |||
187 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
188 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
189 | GEN6_BLITTER_LOCK_SHIFT; | ||
190 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
191 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
192 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
193 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
194 | GEN6_BLITTER_LOCK_SHIFT); | ||
195 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
196 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
197 | |||
198 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); | ||
199 | } | ||
200 | |||
201 | static void ilk_fbc_enable(struct drm_crtc *crtc) | ||
202 | { | ||
203 | struct drm_device *dev = crtc->dev; | ||
204 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
205 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
206 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
207 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
208 | u32 dpfc_ctl; | ||
209 | |||
210 | dev_priv->fbc.enabled = true; | ||
211 | |||
212 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); | ||
213 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
214 | dev_priv->fbc.threshold++; | ||
215 | |||
216 | switch (dev_priv->fbc.threshold) { | ||
217 | case 4: | ||
218 | case 3: | ||
219 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
220 | break; | ||
221 | case 2: | ||
222 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
223 | break; | ||
224 | case 1: | ||
225 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
226 | break; | ||
227 | } | ||
228 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | ||
229 | if (IS_GEN5(dev)) | ||
230 | dpfc_ctl |= obj->fence_reg; | ||
231 | |||
232 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | ||
233 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); | ||
234 | /* enable it... */ | ||
235 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
236 | |||
237 | if (IS_GEN6(dev)) { | ||
238 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
239 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | ||
240 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
241 | snb_fbc_blit_update(dev); | ||
242 | } | ||
243 | |||
244 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
245 | } | ||
246 | |||
247 | static void ilk_fbc_disable(struct drm_device *dev) | ||
248 | { | ||
249 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
250 | u32 dpfc_ctl; | ||
251 | |||
252 | dev_priv->fbc.enabled = false; | ||
253 | |||
254 | /* Disable compression */ | ||
255 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
256 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
257 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
258 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
259 | |||
260 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
261 | } | ||
262 | } | ||
263 | |||
264 | static bool ilk_fbc_enabled(struct drm_device *dev) | ||
265 | { | ||
266 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
267 | |||
268 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | ||
269 | } | ||
270 | |||
271 | static void gen7_fbc_enable(struct drm_crtc *crtc) | ||
272 | { | ||
273 | struct drm_device *dev = crtc->dev; | ||
274 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
275 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
276 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
277 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
278 | u32 dpfc_ctl; | ||
279 | |||
280 | dev_priv->fbc.enabled = true; | ||
281 | |||
282 | dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); | ||
283 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
284 | dev_priv->fbc.threshold++; | ||
285 | |||
286 | switch (dev_priv->fbc.threshold) { | ||
287 | case 4: | ||
288 | case 3: | ||
289 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
290 | break; | ||
291 | case 2: | ||
292 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
293 | break; | ||
294 | case 1: | ||
295 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
296 | break; | ||
297 | } | ||
298 | |||
299 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; | ||
300 | |||
301 | if (dev_priv->fbc.false_color) | ||
302 | dpfc_ctl |= FBC_CTL_FALSE_COLOR; | ||
303 | |||
304 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
305 | |||
306 | if (IS_IVYBRIDGE(dev)) { | ||
307 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ | ||
308 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
309 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
310 | ILK_FBCQ_DIS); | ||
311 | } else { | ||
312 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ | ||
313 | I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe), | ||
314 | I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) | | ||
315 | HSW_FBCQ_DIS); | ||
316 | } | ||
317 | |||
318 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
319 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | ||
320 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
321 | |||
322 | snb_fbc_blit_update(dev); | ||
323 | |||
324 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * intel_fbc_enabled - Is FBC enabled? | ||
329 | * @dev: the drm_device | ||
330 | * | ||
331 | * This function is used to verify the current state of FBC. | ||
332 | * FIXME: This should be tracked in the plane config eventually | ||
333 | * instead of queried at runtime for most callers. | ||
334 | */ | ||
335 | bool intel_fbc_enabled(struct drm_device *dev) | ||
336 | { | ||
337 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
338 | |||
339 | return dev_priv->fbc.enabled; | ||
340 | } | ||
341 | |||
342 | void bdw_fbc_sw_flush(struct drm_device *dev, u32 value) | ||
343 | { | ||
344 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
345 | |||
346 | if (!IS_GEN8(dev)) | ||
347 | return; | ||
348 | |||
349 | if (!intel_fbc_enabled(dev)) | ||
350 | return; | ||
351 | |||
352 | I915_WRITE(MSG_FBC_REND_STATE, value); | ||
353 | } | ||
354 | |||
355 | static void intel_fbc_work_fn(struct work_struct *__work) | ||
356 | { | ||
357 | struct intel_fbc_work *work = | ||
358 | container_of(to_delayed_work(__work), | ||
359 | struct intel_fbc_work, work); | ||
360 | struct drm_device *dev = work->crtc->dev; | ||
361 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
362 | |||
363 | mutex_lock(&dev->struct_mutex); | ||
364 | if (work == dev_priv->fbc.fbc_work) { | ||
365 | /* Double check that we haven't switched fb without cancelling | ||
366 | * the prior work. | ||
367 | */ | ||
368 | if (work->crtc->primary->fb == work->fb) { | ||
369 | dev_priv->display.enable_fbc(work->crtc); | ||
370 | |||
371 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; | ||
372 | dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; | ||
373 | dev_priv->fbc.y = work->crtc->y; | ||
374 | } | ||
375 | |||
376 | dev_priv->fbc.fbc_work = NULL; | ||
377 | } | ||
378 | mutex_unlock(&dev->struct_mutex); | ||
379 | |||
380 | kfree(work); | ||
381 | } | ||
382 | |||
383 | static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) | ||
384 | { | ||
385 | if (dev_priv->fbc.fbc_work == NULL) | ||
386 | return; | ||
387 | |||
388 | DRM_DEBUG_KMS("cancelling pending FBC enable\n"); | ||
389 | |||
390 | /* Synchronisation is provided by struct_mutex and checking of | ||
391 | * dev_priv->fbc.fbc_work, so we can perform the cancellation | ||
392 | * entirely asynchronously. | ||
393 | */ | ||
394 | if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) | ||
395 | /* tasklet was killed before being run, clean up */ | ||
396 | kfree(dev_priv->fbc.fbc_work); | ||
397 | |||
398 | /* Mark the work as no longer wanted so that if it does | ||
399 | * wake-up (because the work was already running and waiting | ||
400 | * for our mutex), it will discover that is no longer | ||
401 | * necessary to run. | ||
402 | */ | ||
403 | dev_priv->fbc.fbc_work = NULL; | ||
404 | } | ||
405 | |||
406 | static void intel_fbc_enable(struct drm_crtc *crtc) | ||
407 | { | ||
408 | struct intel_fbc_work *work; | ||
409 | struct drm_device *dev = crtc->dev; | ||
410 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
411 | |||
412 | if (!dev_priv->display.enable_fbc) | ||
413 | return; | ||
414 | |||
415 | intel_fbc_cancel_work(dev_priv); | ||
416 | |||
417 | work = kzalloc(sizeof(*work), GFP_KERNEL); | ||
418 | if (work == NULL) { | ||
419 | DRM_ERROR("Failed to allocate FBC work structure\n"); | ||
420 | dev_priv->display.enable_fbc(crtc); | ||
421 | return; | ||
422 | } | ||
423 | |||
424 | work->crtc = crtc; | ||
425 | work->fb = crtc->primary->fb; | ||
426 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | ||
427 | |||
428 | dev_priv->fbc.fbc_work = work; | ||
429 | |||
430 | /* Delay the actual enabling to let pageflipping cease and the | ||
431 | * display to settle before starting the compression. Note that | ||
432 | * this delay also serves a second purpose: it allows for a | ||
433 | * vblank to pass after disabling the FBC before we attempt | ||
434 | * to modify the control registers. | ||
435 | * | ||
436 | * A more complicated solution would involve tracking vblanks | ||
437 | * following the termination of the page-flipping sequence | ||
438 | * and indeed performing the enable as a co-routine and not | ||
439 | * waiting synchronously upon the vblank. | ||
440 | * | ||
441 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb | ||
442 | */ | ||
443 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * intel_fbc_disable - disable FBC | ||
448 | * @dev: the drm_device | ||
449 | * | ||
450 | * This function disables FBC. | ||
451 | */ | ||
452 | void intel_fbc_disable(struct drm_device *dev) | ||
453 | { | ||
454 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
455 | |||
456 | intel_fbc_cancel_work(dev_priv); | ||
457 | |||
458 | if (!dev_priv->display.disable_fbc) | ||
459 | return; | ||
460 | |||
461 | dev_priv->display.disable_fbc(dev); | ||
462 | dev_priv->fbc.plane = -1; | ||
463 | } | ||
464 | |||
465 | static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, | ||
466 | enum no_fbc_reason reason) | ||
467 | { | ||
468 | if (dev_priv->fbc.no_fbc_reason == reason) | ||
469 | return false; | ||
470 | |||
471 | dev_priv->fbc.no_fbc_reason = reason; | ||
472 | return true; | ||
473 | } | ||
474 | |||
475 | /** | ||
476 | * intel_fbc_update - enable/disable FBC as needed | ||
477 | * @dev: the drm_device | ||
478 | * | ||
479 | * Set up the framebuffer compression hardware at mode set time. We | ||
480 | * enable it if possible: | ||
481 | * - plane A only (on pre-965) | ||
482 | * - no pixel mulitply/line duplication | ||
483 | * - no alpha buffer discard | ||
484 | * - no dual wide | ||
485 | * - framebuffer <= max_hdisplay in width, max_vdisplay in height | ||
486 | * | ||
487 | * We can't assume that any compression will take place (worst case), | ||
488 | * so the compressed buffer has to be the same size as the uncompressed | ||
489 | * one. It also must reside (along with the line length buffer) in | ||
490 | * stolen memory. | ||
491 | * | ||
492 | * We need to enable/disable FBC on a global basis. | ||
493 | */ | ||
494 | void intel_fbc_update(struct drm_device *dev) | ||
495 | { | ||
496 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
497 | struct drm_crtc *crtc = NULL, *tmp_crtc; | ||
498 | struct intel_crtc *intel_crtc; | ||
499 | struct drm_framebuffer *fb; | ||
500 | struct drm_i915_gem_object *obj; | ||
501 | const struct drm_display_mode *adjusted_mode; | ||
502 | unsigned int max_width, max_height; | ||
503 | |||
504 | if (!HAS_FBC(dev)) { | ||
505 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | if (!i915.powersave) { | ||
510 | if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) | ||
511 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
512 | return; | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * If FBC is already on, we just have to verify that we can | ||
517 | * keep it that way... | ||
518 | * Need to disable if: | ||
519 | * - more than one pipe is active | ||
520 | * - changing FBC params (stride, fence, mode) | ||
521 | * - new fb is too large to fit in compressed buffer | ||
522 | * - going to an unsupported config (interlace, pixel multiply, etc.) | ||
523 | */ | ||
524 | for_each_crtc(dev, tmp_crtc) { | ||
525 | if (intel_crtc_active(tmp_crtc) && | ||
526 | to_intel_crtc(tmp_crtc)->primary_enabled) { | ||
527 | if (crtc) { | ||
528 | if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) | ||
529 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
530 | goto out_disable; | ||
531 | } | ||
532 | crtc = tmp_crtc; | ||
533 | } | ||
534 | } | ||
535 | |||
536 | if (!crtc || crtc->primary->fb == NULL) { | ||
537 | if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) | ||
538 | DRM_DEBUG_KMS("no output, disabling\n"); | ||
539 | goto out_disable; | ||
540 | } | ||
541 | |||
542 | intel_crtc = to_intel_crtc(crtc); | ||
543 | fb = crtc->primary->fb; | ||
544 | obj = intel_fb_obj(fb); | ||
545 | adjusted_mode = &intel_crtc->config.adjusted_mode; | ||
546 | |||
547 | if (i915.enable_fbc < 0) { | ||
548 | if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) | ||
549 | DRM_DEBUG_KMS("disabled per chip default\n"); | ||
550 | goto out_disable; | ||
551 | } | ||
552 | if (!i915.enable_fbc) { | ||
553 | if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) | ||
554 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
555 | goto out_disable; | ||
556 | } | ||
557 | if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || | ||
558 | (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | ||
559 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | ||
560 | DRM_DEBUG_KMS("mode incompatible with compression, " | ||
561 | "disabling\n"); | ||
562 | goto out_disable; | ||
563 | } | ||
564 | |||
565 | if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { | ||
566 | max_width = 4096; | ||
567 | max_height = 4096; | ||
568 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
569 | max_width = 4096; | ||
570 | max_height = 2048; | ||
571 | } else { | ||
572 | max_width = 2048; | ||
573 | max_height = 1536; | ||
574 | } | ||
575 | if (intel_crtc->config.pipe_src_w > max_width || | ||
576 | intel_crtc->config.pipe_src_h > max_height) { | ||
577 | if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) | ||
578 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | ||
579 | goto out_disable; | ||
580 | } | ||
581 | if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) && | ||
582 | intel_crtc->plane != PLANE_A) { | ||
583 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) | ||
584 | DRM_DEBUG_KMS("plane not A, disabling compression\n"); | ||
585 | goto out_disable; | ||
586 | } | ||
587 | |||
588 | /* The use of a CPU fence is mandatory in order to detect writes | ||
589 | * by the CPU to the scanout and trigger updates to the FBC. | ||
590 | */ | ||
591 | if (obj->tiling_mode != I915_TILING_X || | ||
592 | obj->fence_reg == I915_FENCE_REG_NONE) { | ||
593 | if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) | ||
594 | DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); | ||
595 | goto out_disable; | ||
596 | } | ||
597 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | ||
598 | to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) { | ||
599 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | ||
600 | DRM_DEBUG_KMS("Rotation unsupported, disabling\n"); | ||
601 | goto out_disable; | ||
602 | } | ||
603 | |||
604 | /* If the kernel debugger is active, always disable compression */ | ||
605 | if (in_dbg_master()) | ||
606 | goto out_disable; | ||
607 | |||
608 | if (i915_gem_stolen_setup_compression(dev, obj->base.size, | ||
609 | drm_format_plane_cpp(fb->pixel_format, 0))) { | ||
610 | if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) | ||
611 | DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); | ||
612 | goto out_disable; | ||
613 | } | ||
614 | |||
615 | /* If the scanout has not changed, don't modify the FBC settings. | ||
616 | * Note that we make the fundamental assumption that the fb->obj | ||
617 | * cannot be unpinned (and have its GTT offset and fence revoked) | ||
618 | * without first being decoupled from the scanout and FBC disabled. | ||
619 | */ | ||
620 | if (dev_priv->fbc.plane == intel_crtc->plane && | ||
621 | dev_priv->fbc.fb_id == fb->base.id && | ||
622 | dev_priv->fbc.y == crtc->y) | ||
623 | return; | ||
624 | |||
625 | if (intel_fbc_enabled(dev)) { | ||
626 | /* We update FBC along two paths, after changing fb/crtc | ||
627 | * configuration (modeswitching) and after page-flipping | ||
628 | * finishes. For the latter, we know that not only did | ||
629 | * we disable the FBC at the start of the page-flip | ||
630 | * sequence, but also more than one vblank has passed. | ||
631 | * | ||
632 | * For the former case of modeswitching, it is possible | ||
633 | * to switch between two FBC valid configurations | ||
634 | * instantaneously so we do need to disable the FBC | ||
635 | * before we can modify its control registers. We also | ||
636 | * have to wait for the next vblank for that to take | ||
637 | * effect. However, since we delay enabling FBC we can | ||
638 | * assume that a vblank has passed since disabling and | ||
639 | * that we can safely alter the registers in the deferred | ||
640 | * callback. | ||
641 | * | ||
642 | * In the scenario that we go from a valid to invalid | ||
643 | * and then back to valid FBC configuration we have | ||
644 | * no strict enforcement that a vblank occurred since | ||
645 | * disabling the FBC. However, along all current pipe | ||
646 | * disabling paths we do need to wait for a vblank at | ||
647 | * some point. And we wait before enabling FBC anyway. | ||
648 | */ | ||
649 | DRM_DEBUG_KMS("disabling active FBC for update\n"); | ||
650 | intel_fbc_disable(dev); | ||
651 | } | ||
652 | |||
653 | intel_fbc_enable(crtc); | ||
654 | dev_priv->fbc.no_fbc_reason = FBC_OK; | ||
655 | return; | ||
656 | |||
657 | out_disable: | ||
658 | /* Multiple disables should be harmless */ | ||
659 | if (intel_fbc_enabled(dev)) { | ||
660 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | ||
661 | intel_fbc_disable(dev); | ||
662 | } | ||
663 | i915_gem_stolen_cleanup_compression(dev); | ||
664 | } | ||
665 | |||
666 | /** | ||
667 | * intel_fbc_init - Initialize FBC | ||
668 | * @dev_priv: the i915 device | ||
669 | * | ||
670 | * This function might be called during PM init process. | ||
671 | */ | ||
672 | void intel_fbc_init(struct drm_i915_private *dev_priv) | ||
673 | { | ||
674 | if (!HAS_FBC(dev_priv)) { | ||
675 | dev_priv->fbc.enabled = false; | ||
676 | return; | ||
677 | } | ||
678 | |||
679 | if (INTEL_INFO(dev_priv)->gen >= 7) { | ||
680 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; | ||
681 | dev_priv->display.enable_fbc = gen7_fbc_enable; | ||
682 | dev_priv->display.disable_fbc = ilk_fbc_disable; | ||
683 | } else if (INTEL_INFO(dev_priv)->gen >= 5) { | ||
684 | dev_priv->display.fbc_enabled = ilk_fbc_enabled; | ||
685 | dev_priv->display.enable_fbc = ilk_fbc_enable; | ||
686 | dev_priv->display.disable_fbc = ilk_fbc_disable; | ||
687 | } else if (IS_GM45(dev_priv)) { | ||
688 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | ||
689 | dev_priv->display.enable_fbc = g4x_fbc_enable; | ||
690 | dev_priv->display.disable_fbc = g4x_fbc_disable; | ||
691 | } else { | ||
692 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | ||
693 | dev_priv->display.enable_fbc = i8xx_fbc_enable; | ||
694 | dev_priv->display.disable_fbc = i8xx_fbc_disable; | ||
695 | |||
696 | /* This value was pulled out of someone's hat */ | ||
697 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); | ||
698 | } | ||
699 | |||
700 | dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev); | ||
701 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e588376227ea..7670a0f0f620 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -212,8 +212,7 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring, | |||
212 | * @enable_execlists: value of i915.enable_execlists module parameter. | 212 | * @enable_execlists: value of i915.enable_execlists module parameter. |
213 | * | 213 | * |
214 | * Only certain platforms support Execlists (the prerequisites being | 214 | * Only certain platforms support Execlists (the prerequisites being |
215 | * support for Logical Ring Contexts and Aliasing PPGTT or better), | 215 | * support for Logical Ring Contexts and Aliasing PPGTT or better). |
216 | * and only when enabled via module parameter. | ||
217 | * | 216 | * |
218 | * Return: 1 if Execlists is supported and has to be enabled. | 217 | * Return: 1 if Execlists is supported and has to be enabled. |
219 | */ | 218 | */ |
@@ -474,13 +473,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring, | |||
474 | } | 473 | } |
475 | 474 | ||
476 | /** | 475 | /** |
477 | * intel_execlists_handle_ctx_events() - handle Context Switch interrupts | 476 | * intel_lrc_irq_handler() - handle Context Switch interrupts |
478 | * @ring: Engine Command Streamer to handle. | 477 | * @ring: Engine Command Streamer to handle. |
479 | * | 478 | * |
480 | * Check the unread Context Status Buffers and manage the submission of new | 479 | * Check the unread Context Status Buffers and manage the submission of new |
481 | * contexts to the ELSP accordingly. | 480 | * contexts to the ELSP accordingly. |
482 | */ | 481 | */ |
483 | void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring) | 482 | void intel_lrc_irq_handler(struct intel_engine_cs *ring) |
484 | { | 483 | { |
485 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 484 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
486 | u32 status_pointer; | 485 | u32 status_pointer; |
@@ -876,40 +875,48 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring, | |||
876 | } | 875 | } |
877 | } | 876 | } |
878 | 877 | ||
879 | static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, | 878 | static int logical_ring_alloc_request(struct intel_engine_cs *ring, |
880 | struct intel_context *ctx) | 879 | struct intel_context *ctx) |
881 | { | 880 | { |
881 | struct drm_i915_gem_request *request; | ||
882 | struct drm_i915_private *dev_private = ring->dev->dev_private; | ||
882 | int ret; | 883 | int ret; |
883 | 884 | ||
884 | if (ring->outstanding_lazy_seqno) | 885 | if (ring->outstanding_lazy_request) |
885 | return 0; | 886 | return 0; |
886 | 887 | ||
887 | if (ring->preallocated_lazy_request == NULL) { | 888 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
888 | struct drm_i915_gem_request *request; | 889 | if (request == NULL) |
889 | 890 | return -ENOMEM; | |
890 | request = kmalloc(sizeof(*request), GFP_KERNEL); | ||
891 | if (request == NULL) | ||
892 | return -ENOMEM; | ||
893 | 891 | ||
894 | if (ctx != ring->default_context) { | 892 | if (ctx != ring->default_context) { |
895 | ret = intel_lr_context_pin(ring, ctx); | 893 | ret = intel_lr_context_pin(ring, ctx); |
896 | if (ret) { | 894 | if (ret) { |
897 | kfree(request); | 895 | kfree(request); |
898 | return ret; | 896 | return ret; |
899 | } | ||
900 | } | 897 | } |
898 | } | ||
901 | 899 | ||
902 | /* Hold a reference to the context this request belongs to | 900 | kref_init(&request->ref); |
903 | * (we will need it when the time comes to emit/retire the | 901 | request->ring = ring; |
904 | * request). | 902 | request->uniq = dev_private->request_uniq++; |
905 | */ | ||
906 | request->ctx = ctx; | ||
907 | i915_gem_context_reference(request->ctx); | ||
908 | 903 | ||
909 | ring->preallocated_lazy_request = request; | 904 | ret = i915_gem_get_seqno(ring->dev, &request->seqno); |
905 | if (ret) { | ||
906 | intel_lr_context_unpin(ring, ctx); | ||
907 | kfree(request); | ||
908 | return ret; | ||
910 | } | 909 | } |
911 | 910 | ||
912 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); | 911 | /* Hold a reference to the context this request belongs to |
912 | * (we will need it when the time comes to emit/retire the | ||
913 | * request). | ||
914 | */ | ||
915 | request->ctx = ctx; | ||
916 | i915_gem_context_reference(request->ctx); | ||
917 | |||
918 | ring->outstanding_lazy_request = request; | ||
919 | return 0; | ||
913 | } | 920 | } |
914 | 921 | ||
915 | static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, | 922 | static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, |
@@ -917,39 +924,38 @@ static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf, | |||
917 | { | 924 | { |
918 | struct intel_engine_cs *ring = ringbuf->ring; | 925 | struct intel_engine_cs *ring = ringbuf->ring; |
919 | struct drm_i915_gem_request *request; | 926 | struct drm_i915_gem_request *request; |
920 | u32 seqno = 0; | ||
921 | int ret; | 927 | int ret; |
922 | 928 | ||
923 | if (ringbuf->last_retired_head != -1) { | 929 | if (intel_ring_space(ringbuf) >= bytes) |
924 | ringbuf->head = ringbuf->last_retired_head; | 930 | return 0; |
925 | ringbuf->last_retired_head = -1; | ||
926 | |||
927 | ringbuf->space = intel_ring_space(ringbuf); | ||
928 | if (ringbuf->space >= bytes) | ||
929 | return 0; | ||
930 | } | ||
931 | 931 | ||
932 | list_for_each_entry(request, &ring->request_list, list) { | 932 | list_for_each_entry(request, &ring->request_list, list) { |
933 | /* | ||
934 | * The request queue is per-engine, so can contain requests | ||
935 | * from multiple ringbuffers. Here, we must ignore any that | ||
936 | * aren't from the ringbuffer we're considering. | ||
937 | */ | ||
938 | struct intel_context *ctx = request->ctx; | ||
939 | if (ctx->engine[ring->id].ringbuf != ringbuf) | ||
940 | continue; | ||
941 | |||
942 | /* Would completion of this request free enough space? */ | ||
933 | if (__intel_ring_space(request->tail, ringbuf->tail, | 943 | if (__intel_ring_space(request->tail, ringbuf->tail, |
934 | ringbuf->size) >= bytes) { | 944 | ringbuf->size) >= bytes) { |
935 | seqno = request->seqno; | ||
936 | break; | 945 | break; |
937 | } | 946 | } |
938 | } | 947 | } |
939 | 948 | ||
940 | if (seqno == 0) | 949 | if (&request->list == &ring->request_list) |
941 | return -ENOSPC; | 950 | return -ENOSPC; |
942 | 951 | ||
943 | ret = i915_wait_seqno(ring, seqno); | 952 | ret = i915_wait_request(request); |
944 | if (ret) | 953 | if (ret) |
945 | return ret; | 954 | return ret; |
946 | 955 | ||
947 | i915_gem_retire_requests_ring(ring); | 956 | i915_gem_retire_requests_ring(ring); |
948 | ringbuf->head = ringbuf->last_retired_head; | ||
949 | ringbuf->last_retired_head = -1; | ||
950 | 957 | ||
951 | ringbuf->space = intel_ring_space(ringbuf); | 958 | return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC; |
952 | return 0; | ||
953 | } | 959 | } |
954 | 960 | ||
955 | static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, | 961 | static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, |
@@ -975,13 +981,10 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf, | |||
975 | * case by choosing an insanely large timeout. */ | 981 | * case by choosing an insanely large timeout. */ |
976 | end = jiffies + 60 * HZ; | 982 | end = jiffies + 60 * HZ; |
977 | 983 | ||
984 | ret = 0; | ||
978 | do { | 985 | do { |
979 | ringbuf->head = I915_READ_HEAD(ring); | 986 | if (intel_ring_space(ringbuf) >= bytes) |
980 | ringbuf->space = intel_ring_space(ringbuf); | ||
981 | if (ringbuf->space >= bytes) { | ||
982 | ret = 0; | ||
983 | break; | 987 | break; |
984 | } | ||
985 | 988 | ||
986 | msleep(1); | 989 | msleep(1); |
987 | 990 | ||
@@ -1022,7 +1025,7 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf) | |||
1022 | iowrite32(MI_NOOP, virt++); | 1025 | iowrite32(MI_NOOP, virt++); |
1023 | 1026 | ||
1024 | ringbuf->tail = 0; | 1027 | ringbuf->tail = 0; |
1025 | ringbuf->space = intel_ring_space(ringbuf); | 1028 | intel_ring_update_space(ringbuf); |
1026 | 1029 | ||
1027 | return 0; | 1030 | return 0; |
1028 | } | 1031 | } |
@@ -1076,7 +1079,7 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords) | |||
1076 | return ret; | 1079 | return ret; |
1077 | 1080 | ||
1078 | /* Preallocate the olr before touching the ring */ | 1081 | /* Preallocate the olr before touching the ring */ |
1079 | ret = logical_ring_alloc_seqno(ring, ringbuf->FIXME_lrc_ctx); | 1082 | ret = logical_ring_alloc_request(ring, ringbuf->FIXME_lrc_ctx); |
1080 | if (ret) | 1083 | if (ret) |
1081 | return ret; | 1084 | return ret; |
1082 | 1085 | ||
@@ -1093,7 +1096,7 @@ static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | 1096 | struct drm_i915_private *dev_priv = dev->dev_private; |
1094 | struct i915_workarounds *w = &dev_priv->workarounds; | 1097 | struct i915_workarounds *w = &dev_priv->workarounds; |
1095 | 1098 | ||
1096 | if (WARN_ON(w->count == 0)) | 1099 | if (WARN_ON_ONCE(w->count == 0)) |
1097 | return 0; | 1100 | return 0; |
1098 | 1101 | ||
1099 | ring->gpu_caches_dirty = true; | 1102 | ring->gpu_caches_dirty = true; |
@@ -1159,10 +1162,6 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring) | |||
1159 | */ | 1162 | */ |
1160 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); | 1163 | I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); |
1161 | 1164 | ||
1162 | ret = intel_init_pipe_control(ring); | ||
1163 | if (ret) | ||
1164 | return ret; | ||
1165 | |||
1166 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 1165 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
1167 | 1166 | ||
1168 | return init_workarounds_ring(ring); | 1167 | return init_workarounds_ring(ring); |
@@ -1321,7 +1320,7 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf) | |||
1321 | if (ret) | 1320 | if (ret) |
1322 | return ret; | 1321 | return ret; |
1323 | 1322 | ||
1324 | cmd = MI_STORE_DWORD_IMM_GEN8; | 1323 | cmd = MI_STORE_DWORD_IMM_GEN4; |
1325 | cmd |= MI_GLOBAL_GTT; | 1324 | cmd |= MI_GLOBAL_GTT; |
1326 | 1325 | ||
1327 | intel_logical_ring_emit(ringbuf, cmd); | 1326 | intel_logical_ring_emit(ringbuf, cmd); |
@@ -1329,7 +1328,8 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf) | |||
1329 | (ring->status_page.gfx_addr + | 1328 | (ring->status_page.gfx_addr + |
1330 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); | 1329 | (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT))); |
1331 | intel_logical_ring_emit(ringbuf, 0); | 1330 | intel_logical_ring_emit(ringbuf, 0); |
1332 | intel_logical_ring_emit(ringbuf, ring->outstanding_lazy_seqno); | 1331 | intel_logical_ring_emit(ringbuf, |
1332 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
1333 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); | 1333 | intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); |
1334 | intel_logical_ring_emit(ringbuf, MI_NOOP); | 1334 | intel_logical_ring_emit(ringbuf, MI_NOOP); |
1335 | intel_logical_ring_advance_and_submit(ringbuf); | 1335 | intel_logical_ring_advance_and_submit(ringbuf); |
@@ -1337,6 +1337,18 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf) | |||
1337 | return 0; | 1337 | return 0; |
1338 | } | 1338 | } |
1339 | 1339 | ||
1340 | static int gen8_init_rcs_context(struct intel_engine_cs *ring, | ||
1341 | struct intel_context *ctx) | ||
1342 | { | ||
1343 | int ret; | ||
1344 | |||
1345 | ret = intel_logical_ring_workarounds_emit(ring, ctx); | ||
1346 | if (ret) | ||
1347 | return ret; | ||
1348 | |||
1349 | return intel_lr_context_render_state_init(ring, ctx); | ||
1350 | } | ||
1351 | |||
1340 | /** | 1352 | /** |
1341 | * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer | 1353 | * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer |
1342 | * | 1354 | * |
@@ -1354,8 +1366,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring) | |||
1354 | 1366 | ||
1355 | intel_logical_ring_stop(ring); | 1367 | intel_logical_ring_stop(ring); |
1356 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1368 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1357 | ring->preallocated_lazy_request = NULL; | 1369 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); |
1358 | ring->outstanding_lazy_seqno = 0; | ||
1359 | 1370 | ||
1360 | if (ring->cleanup) | 1371 | if (ring->cleanup) |
1361 | ring->cleanup(ring); | 1372 | ring->cleanup(ring); |
@@ -1389,12 +1400,6 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin | |||
1389 | if (ret) | 1400 | if (ret) |
1390 | return ret; | 1401 | return ret; |
1391 | 1402 | ||
1392 | if (ring->init) { | ||
1393 | ret = ring->init(ring); | ||
1394 | if (ret) | ||
1395 | return ret; | ||
1396 | } | ||
1397 | |||
1398 | ret = intel_lr_context_deferred_create(ring->default_context, ring); | 1403 | ret = intel_lr_context_deferred_create(ring->default_context, ring); |
1399 | 1404 | ||
1400 | return ret; | 1405 | return ret; |
@@ -1404,6 +1409,7 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
1404 | { | 1409 | { |
1405 | struct drm_i915_private *dev_priv = dev->dev_private; | 1410 | struct drm_i915_private *dev_priv = dev->dev_private; |
1406 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | 1411 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
1412 | int ret; | ||
1407 | 1413 | ||
1408 | ring->name = "render ring"; | 1414 | ring->name = "render ring"; |
1409 | ring->id = RCS; | 1415 | ring->id = RCS; |
@@ -1415,8 +1421,8 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
1415 | if (HAS_L3_DPF(dev)) | 1421 | if (HAS_L3_DPF(dev)) |
1416 | ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | 1422 | ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
1417 | 1423 | ||
1418 | ring->init = gen8_init_render_ring; | 1424 | ring->init_hw = gen8_init_render_ring; |
1419 | ring->init_context = intel_logical_ring_workarounds_emit; | 1425 | ring->init_context = gen8_init_rcs_context; |
1420 | ring->cleanup = intel_fini_pipe_control; | 1426 | ring->cleanup = intel_fini_pipe_control; |
1421 | ring->get_seqno = gen8_get_seqno; | 1427 | ring->get_seqno = gen8_get_seqno; |
1422 | ring->set_seqno = gen8_set_seqno; | 1428 | ring->set_seqno = gen8_set_seqno; |
@@ -1426,7 +1432,12 @@ static int logical_render_ring_init(struct drm_device *dev) | |||
1426 | ring->irq_put = gen8_logical_ring_put_irq; | 1432 | ring->irq_put = gen8_logical_ring_put_irq; |
1427 | ring->emit_bb_start = gen8_emit_bb_start; | 1433 | ring->emit_bb_start = gen8_emit_bb_start; |
1428 | 1434 | ||
1429 | return logical_ring_init(dev, ring); | 1435 | ring->dev = dev; |
1436 | ret = logical_ring_init(dev, ring); | ||
1437 | if (ret) | ||
1438 | return ret; | ||
1439 | |||
1440 | return intel_init_pipe_control(ring); | ||
1430 | } | 1441 | } |
1431 | 1442 | ||
1432 | static int logical_bsd_ring_init(struct drm_device *dev) | 1443 | static int logical_bsd_ring_init(struct drm_device *dev) |
@@ -1442,7 +1453,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) | |||
1442 | ring->irq_keep_mask = | 1453 | ring->irq_keep_mask = |
1443 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; | 1454 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; |
1444 | 1455 | ||
1445 | ring->init = gen8_init_common_ring; | 1456 | ring->init_hw = gen8_init_common_ring; |
1446 | ring->get_seqno = gen8_get_seqno; | 1457 | ring->get_seqno = gen8_get_seqno; |
1447 | ring->set_seqno = gen8_set_seqno; | 1458 | ring->set_seqno = gen8_set_seqno; |
1448 | ring->emit_request = gen8_emit_request; | 1459 | ring->emit_request = gen8_emit_request; |
@@ -1467,7 +1478,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev) | |||
1467 | ring->irq_keep_mask = | 1478 | ring->irq_keep_mask = |
1468 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; | 1479 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; |
1469 | 1480 | ||
1470 | ring->init = gen8_init_common_ring; | 1481 | ring->init_hw = gen8_init_common_ring; |
1471 | ring->get_seqno = gen8_get_seqno; | 1482 | ring->get_seqno = gen8_get_seqno; |
1472 | ring->set_seqno = gen8_set_seqno; | 1483 | ring->set_seqno = gen8_set_seqno; |
1473 | ring->emit_request = gen8_emit_request; | 1484 | ring->emit_request = gen8_emit_request; |
@@ -1492,7 +1503,7 @@ static int logical_blt_ring_init(struct drm_device *dev) | |||
1492 | ring->irq_keep_mask = | 1503 | ring->irq_keep_mask = |
1493 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; | 1504 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT; |
1494 | 1505 | ||
1495 | ring->init = gen8_init_common_ring; | 1506 | ring->init_hw = gen8_init_common_ring; |
1496 | ring->get_seqno = gen8_get_seqno; | 1507 | ring->get_seqno = gen8_get_seqno; |
1497 | ring->set_seqno = gen8_set_seqno; | 1508 | ring->set_seqno = gen8_set_seqno; |
1498 | ring->emit_request = gen8_emit_request; | 1509 | ring->emit_request = gen8_emit_request; |
@@ -1517,7 +1528,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) | |||
1517 | ring->irq_keep_mask = | 1528 | ring->irq_keep_mask = |
1518 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; | 1529 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT; |
1519 | 1530 | ||
1520 | ring->init = gen8_init_common_ring; | 1531 | ring->init_hw = gen8_init_common_ring; |
1521 | ring->get_seqno = gen8_get_seqno; | 1532 | ring->get_seqno = gen8_get_seqno; |
1522 | ring->set_seqno = gen8_set_seqno; | 1533 | ring->set_seqno = gen8_set_seqno; |
1523 | ring->emit_request = gen8_emit_request; | 1534 | ring->emit_request = gen8_emit_request; |
@@ -1616,7 +1627,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring, | |||
1616 | 1627 | ||
1617 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); | 1628 | i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); |
1618 | 1629 | ||
1619 | ret = __i915_add_request(ring, file, so.obj, NULL); | 1630 | ret = __i915_add_request(ring, file, so.obj); |
1620 | /* intel_logical_ring_add_request moves object to inactive if it | 1631 | /* intel_logical_ring_add_request moves object to inactive if it |
1621 | * fails */ | 1632 | * fails */ |
1622 | out: | 1633 | out: |
@@ -1835,8 +1846,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
1835 | int ret; | 1846 | int ret; |
1836 | 1847 | ||
1837 | WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); | 1848 | WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); |
1838 | if (ctx->engine[ring->id].state) | 1849 | WARN_ON(ctx->engine[ring->id].state); |
1839 | return 0; | ||
1840 | 1850 | ||
1841 | context_size = round_up(get_lr_context_size(ring), 4096); | 1851 | context_size = round_up(get_lr_context_size(ring), 4096); |
1842 | 1852 | ||
@@ -1872,8 +1882,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
1872 | ringbuf->effective_size = ringbuf->size; | 1882 | ringbuf->effective_size = ringbuf->size; |
1873 | ringbuf->head = 0; | 1883 | ringbuf->head = 0; |
1874 | ringbuf->tail = 0; | 1884 | ringbuf->tail = 0; |
1875 | ringbuf->space = ringbuf->size; | ||
1876 | ringbuf->last_retired_head = -1; | 1885 | ringbuf->last_retired_head = -1; |
1886 | intel_ring_update_space(ringbuf); | ||
1877 | 1887 | ||
1878 | if (ringbuf->obj == NULL) { | 1888 | if (ringbuf->obj == NULL) { |
1879 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | 1889 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); |
@@ -1907,21 +1917,17 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
1907 | 1917 | ||
1908 | if (ctx == ring->default_context) | 1918 | if (ctx == ring->default_context) |
1909 | lrc_setup_hardware_status_page(ring, ctx_obj); | 1919 | lrc_setup_hardware_status_page(ring, ctx_obj); |
1910 | 1920 | else if (ring->id == RCS && !ctx->rcs_initialized) { | |
1911 | if (ring->id == RCS && !ctx->rcs_initialized) { | ||
1912 | if (ring->init_context) { | 1921 | if (ring->init_context) { |
1913 | ret = ring->init_context(ring, ctx); | 1922 | ret = ring->init_context(ring, ctx); |
1914 | if (ret) | 1923 | if (ret) { |
1915 | DRM_ERROR("ring init context: %d\n", ret); | 1924 | DRM_ERROR("ring init context: %d\n", ret); |
1925 | ctx->engine[ring->id].ringbuf = NULL; | ||
1926 | ctx->engine[ring->id].state = NULL; | ||
1927 | goto error; | ||
1928 | } | ||
1916 | } | 1929 | } |
1917 | 1930 | ||
1918 | ret = intel_lr_context_render_state_init(ring, ctx); | ||
1919 | if (ret) { | ||
1920 | DRM_ERROR("Init render state failed: %d\n", ret); | ||
1921 | ctx->engine[ring->id].ringbuf = NULL; | ||
1922 | ctx->engine[ring->id].state = NULL; | ||
1923 | goto error; | ||
1924 | } | ||
1925 | ctx->rcs_initialized = true; | 1931 | ctx->rcs_initialized = true; |
1926 | } | 1932 | } |
1927 | 1933 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 14b216b9be7f..960fcbd2c98a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h | |||
@@ -112,7 +112,7 @@ struct intel_ctx_submit_request { | |||
112 | int elsp_submitted; | 112 | int elsp_submitted; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); | 115 | void intel_lrc_irq_handler(struct intel_engine_cs *ring); |
116 | void intel_execlists_retire_requests(struct intel_engine_cs *ring); | 116 | void intel_execlists_retire_requests(struct intel_engine_cs *ring); |
117 | 117 | ||
118 | #endif /* _INTEL_LRC_H_ */ | 118 | #endif /* _INTEL_LRC_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index dc2f4f26c961..973c9de3b87d 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -182,7 +182,7 @@ struct intel_overlay { | |||
182 | u32 flip_addr; | 182 | u32 flip_addr; |
183 | struct drm_i915_gem_object *reg_bo; | 183 | struct drm_i915_gem_object *reg_bo; |
184 | /* flip handling */ | 184 | /* flip handling */ |
185 | uint32_t last_flip_req; | 185 | struct drm_i915_gem_request *last_flip_req; |
186 | void (*flip_tail)(struct intel_overlay *); | 186 | void (*flip_tail)(struct intel_overlay *); |
187 | }; | 187 | }; |
188 | 188 | ||
@@ -217,17 +217,19 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, | |||
217 | int ret; | 217 | int ret; |
218 | 218 | ||
219 | BUG_ON(overlay->last_flip_req); | 219 | BUG_ON(overlay->last_flip_req); |
220 | ret = i915_add_request(ring, &overlay->last_flip_req); | 220 | i915_gem_request_assign(&overlay->last_flip_req, |
221 | ring->outstanding_lazy_request); | ||
222 | ret = i915_add_request(ring); | ||
221 | if (ret) | 223 | if (ret) |
222 | return ret; | 224 | return ret; |
223 | 225 | ||
224 | overlay->flip_tail = tail; | 226 | overlay->flip_tail = tail; |
225 | ret = i915_wait_seqno(ring, overlay->last_flip_req); | 227 | ret = i915_wait_request(overlay->last_flip_req); |
226 | if (ret) | 228 | if (ret) |
227 | return ret; | 229 | return ret; |
228 | i915_gem_retire_requests(dev); | 230 | i915_gem_retire_requests(dev); |
229 | 231 | ||
230 | overlay->last_flip_req = 0; | 232 | i915_gem_request_assign(&overlay->last_flip_req, NULL); |
231 | return 0; | 233 | return 0; |
232 | } | 234 | } |
233 | 235 | ||
@@ -286,7 +288,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay, | |||
286 | intel_ring_emit(ring, flip_addr); | 288 | intel_ring_emit(ring, flip_addr); |
287 | intel_ring_advance(ring); | 289 | intel_ring_advance(ring); |
288 | 290 | ||
289 | return i915_add_request(ring, &overlay->last_flip_req); | 291 | WARN_ON(overlay->last_flip_req); |
292 | i915_gem_request_assign(&overlay->last_flip_req, | ||
293 | ring->outstanding_lazy_request); | ||
294 | return i915_add_request(ring); | ||
290 | } | 295 | } |
291 | 296 | ||
292 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) | 297 | static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) |
@@ -361,23 +366,20 @@ static int intel_overlay_off(struct intel_overlay *overlay) | |||
361 | * We have to be careful not to repeat work forever an make forward progess. */ | 366 | * We have to be careful not to repeat work forever an make forward progess. */ |
362 | static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) | 367 | static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay) |
363 | { | 368 | { |
364 | struct drm_device *dev = overlay->dev; | ||
365 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
366 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | ||
367 | int ret; | 369 | int ret; |
368 | 370 | ||
369 | if (overlay->last_flip_req == 0) | 371 | if (overlay->last_flip_req == NULL) |
370 | return 0; | 372 | return 0; |
371 | 373 | ||
372 | ret = i915_wait_seqno(ring, overlay->last_flip_req); | 374 | ret = i915_wait_request(overlay->last_flip_req); |
373 | if (ret) | 375 | if (ret) |
374 | return ret; | 376 | return ret; |
375 | i915_gem_retire_requests(dev); | 377 | i915_gem_retire_requests(overlay->dev); |
376 | 378 | ||
377 | if (overlay->flip_tail) | 379 | if (overlay->flip_tail) |
378 | overlay->flip_tail(overlay); | 380 | overlay->flip_tail(overlay); |
379 | 381 | ||
380 | overlay->last_flip_req = 0; | 382 | i915_gem_request_assign(&overlay->last_flip_req, NULL); |
381 | return 0; | 383 | return 0; |
382 | } | 384 | } |
383 | 385 | ||
@@ -392,6 +394,8 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
392 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; | 394 | struct intel_engine_cs *ring = &dev_priv->ring[RCS]; |
393 | int ret; | 395 | int ret; |
394 | 396 | ||
397 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
398 | |||
395 | /* Only wait if there is actually an old frame to release to | 399 | /* Only wait if there is actually an old frame to release to |
396 | * guarantee forward progress. | 400 | * guarantee forward progress. |
397 | */ | 401 | */ |
@@ -422,6 +426,22 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) | |||
422 | return 0; | 426 | return 0; |
423 | } | 427 | } |
424 | 428 | ||
429 | void intel_overlay_reset(struct drm_i915_private *dev_priv) | ||
430 | { | ||
431 | struct intel_overlay *overlay = dev_priv->overlay; | ||
432 | |||
433 | if (!overlay) | ||
434 | return; | ||
435 | |||
436 | intel_overlay_release_old_vid(overlay); | ||
437 | |||
438 | overlay->last_flip_req = NULL; | ||
439 | overlay->old_xscale = 0; | ||
440 | overlay->old_yscale = 0; | ||
441 | overlay->crtc = NULL; | ||
442 | overlay->active = false; | ||
443 | } | ||
444 | |||
425 | struct put_image_params { | 445 | struct put_image_params { |
426 | int format; | 446 | int format; |
427 | short dst_x; | 447 | short dst_x; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 964b28e3c630..a3ebaa873107 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -52,17 +52,6 @@ | |||
52 | #define INTEL_RC6p_ENABLE (1<<1) | 52 | #define INTEL_RC6p_ENABLE (1<<1) |
53 | #define INTEL_RC6pp_ENABLE (1<<2) | 53 | #define INTEL_RC6pp_ENABLE (1<<2) |
54 | 54 | ||
55 | /* FBC, or Frame Buffer Compression, is a technique employed to compress the | ||
56 | * framebuffer contents in-memory, aiming at reducing the required bandwidth | ||
57 | * during in-memory transfers and, therefore, reduce the power packet. | ||
58 | * | ||
59 | * The benefits of FBC are mostly visible with solid backgrounds and | ||
60 | * variation-less patterns. | ||
61 | * | ||
62 | * FBC-related functionality can be enabled by the means of the | ||
63 | * i915.i915_enable_fbc parameter | ||
64 | */ | ||
65 | |||
66 | static void gen9_init_clock_gating(struct drm_device *dev) | 55 | static void gen9_init_clock_gating(struct drm_device *dev) |
67 | { | 56 | { |
68 | struct drm_i915_private *dev_priv = dev->dev_private; | 57 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -87,613 +76,6 @@ static void gen9_init_clock_gating(struct drm_device *dev) | |||
87 | _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); | 76 | _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); |
88 | } | 77 | } |
89 | 78 | ||
90 | static void i8xx_disable_fbc(struct drm_device *dev) | ||
91 | { | ||
92 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
93 | u32 fbc_ctl; | ||
94 | |||
95 | dev_priv->fbc.enabled = false; | ||
96 | |||
97 | /* Disable compression */ | ||
98 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
99 | if ((fbc_ctl & FBC_CTL_EN) == 0) | ||
100 | return; | ||
101 | |||
102 | fbc_ctl &= ~FBC_CTL_EN; | ||
103 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
104 | |||
105 | /* Wait for compressing bit to clear */ | ||
106 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | ||
107 | DRM_DEBUG_KMS("FBC idle timed out\n"); | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
112 | } | ||
113 | |||
114 | static void i8xx_enable_fbc(struct drm_crtc *crtc) | ||
115 | { | ||
116 | struct drm_device *dev = crtc->dev; | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
119 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
120 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
121 | int cfb_pitch; | ||
122 | int i; | ||
123 | u32 fbc_ctl; | ||
124 | |||
125 | dev_priv->fbc.enabled = true; | ||
126 | |||
127 | cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; | ||
128 | if (fb->pitches[0] < cfb_pitch) | ||
129 | cfb_pitch = fb->pitches[0]; | ||
130 | |||
131 | /* FBC_CTL wants 32B or 64B units */ | ||
132 | if (IS_GEN2(dev)) | ||
133 | cfb_pitch = (cfb_pitch / 32) - 1; | ||
134 | else | ||
135 | cfb_pitch = (cfb_pitch / 64) - 1; | ||
136 | |||
137 | /* Clear old tags */ | ||
138 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | ||
139 | I915_WRITE(FBC_TAG + (i * 4), 0); | ||
140 | |||
141 | if (IS_GEN4(dev)) { | ||
142 | u32 fbc_ctl2; | ||
143 | |||
144 | /* Set it up... */ | ||
145 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | ||
146 | fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane); | ||
147 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); | ||
148 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | ||
149 | } | ||
150 | |||
151 | /* enable it... */ | ||
152 | fbc_ctl = I915_READ(FBC_CONTROL); | ||
153 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | ||
154 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | ||
155 | if (IS_I945GM(dev)) | ||
156 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ | ||
157 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | ||
158 | fbc_ctl |= obj->fence_reg; | ||
159 | I915_WRITE(FBC_CONTROL, fbc_ctl); | ||
160 | |||
161 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n", | ||
162 | cfb_pitch, crtc->y, plane_name(intel_crtc->plane)); | ||
163 | } | ||
164 | |||
165 | static bool i8xx_fbc_enabled(struct drm_device *dev) | ||
166 | { | ||
167 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
168 | |||
169 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | ||
170 | } | ||
171 | |||
172 | static void g4x_enable_fbc(struct drm_crtc *crtc) | ||
173 | { | ||
174 | struct drm_device *dev = crtc->dev; | ||
175 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
176 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
177 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
178 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
179 | u32 dpfc_ctl; | ||
180 | |||
181 | dev_priv->fbc.enabled = true; | ||
182 | |||
183 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; | ||
184 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
185 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
186 | else | ||
187 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
188 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; | ||
189 | |||
190 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | ||
191 | |||
192 | /* enable it... */ | ||
193 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
194 | |||
195 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
196 | } | ||
197 | |||
198 | static void g4x_disable_fbc(struct drm_device *dev) | ||
199 | { | ||
200 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
201 | u32 dpfc_ctl; | ||
202 | |||
203 | dev_priv->fbc.enabled = false; | ||
204 | |||
205 | /* Disable compression */ | ||
206 | dpfc_ctl = I915_READ(DPFC_CONTROL); | ||
207 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
208 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
209 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | ||
210 | |||
211 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | static bool g4x_fbc_enabled(struct drm_device *dev) | ||
216 | { | ||
217 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
218 | |||
219 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | ||
220 | } | ||
221 | |||
222 | static void sandybridge_blit_fbc_update(struct drm_device *dev) | ||
223 | { | ||
224 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
225 | u32 blt_ecoskpd; | ||
226 | |||
227 | /* Make sure blitter notifies FBC of writes */ | ||
228 | |||
229 | /* Blitter is part of Media powerwell on VLV. No impact of | ||
230 | * his param in other platforms for now */ | ||
231 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA); | ||
232 | |||
233 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | ||
234 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | ||
235 | GEN6_BLITTER_LOCK_SHIFT; | ||
236 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
237 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | ||
238 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
239 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | ||
240 | GEN6_BLITTER_LOCK_SHIFT); | ||
241 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | ||
242 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | ||
243 | |||
244 | gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA); | ||
245 | } | ||
246 | |||
247 | static void ironlake_enable_fbc(struct drm_crtc *crtc) | ||
248 | { | ||
249 | struct drm_device *dev = crtc->dev; | ||
250 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
251 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
252 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
253 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
254 | u32 dpfc_ctl; | ||
255 | |||
256 | dev_priv->fbc.enabled = true; | ||
257 | |||
258 | dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); | ||
259 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
260 | dev_priv->fbc.threshold++; | ||
261 | |||
262 | switch (dev_priv->fbc.threshold) { | ||
263 | case 4: | ||
264 | case 3: | ||
265 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
266 | break; | ||
267 | case 2: | ||
268 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
269 | break; | ||
270 | case 1: | ||
271 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
272 | break; | ||
273 | } | ||
274 | dpfc_ctl |= DPFC_CTL_FENCE_EN; | ||
275 | if (IS_GEN5(dev)) | ||
276 | dpfc_ctl |= obj->fence_reg; | ||
277 | |||
278 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | ||
279 | I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); | ||
280 | /* enable it... */ | ||
281 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
282 | |||
283 | if (IS_GEN6(dev)) { | ||
284 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
285 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | ||
286 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
287 | sandybridge_blit_fbc_update(dev); | ||
288 | } | ||
289 | |||
290 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
291 | } | ||
292 | |||
293 | static void ironlake_disable_fbc(struct drm_device *dev) | ||
294 | { | ||
295 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
296 | u32 dpfc_ctl; | ||
297 | |||
298 | dev_priv->fbc.enabled = false; | ||
299 | |||
300 | /* Disable compression */ | ||
301 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | ||
302 | if (dpfc_ctl & DPFC_CTL_EN) { | ||
303 | dpfc_ctl &= ~DPFC_CTL_EN; | ||
304 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | ||
305 | |||
306 | DRM_DEBUG_KMS("disabled FBC\n"); | ||
307 | } | ||
308 | } | ||
309 | |||
310 | static bool ironlake_fbc_enabled(struct drm_device *dev) | ||
311 | { | ||
312 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
313 | |||
314 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | ||
315 | } | ||
316 | |||
317 | static void gen7_enable_fbc(struct drm_crtc *crtc) | ||
318 | { | ||
319 | struct drm_device *dev = crtc->dev; | ||
320 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
321 | struct drm_framebuffer *fb = crtc->primary->fb; | ||
322 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
323 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
324 | u32 dpfc_ctl; | ||
325 | |||
326 | dev_priv->fbc.enabled = true; | ||
327 | |||
328 | dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); | ||
329 | if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) | ||
330 | dev_priv->fbc.threshold++; | ||
331 | |||
332 | switch (dev_priv->fbc.threshold) { | ||
333 | case 4: | ||
334 | case 3: | ||
335 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | ||
336 | break; | ||
337 | case 2: | ||
338 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | ||
339 | break; | ||
340 | case 1: | ||
341 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | ||
342 | break; | ||
343 | } | ||
344 | |||
345 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; | ||
346 | |||
347 | if (dev_priv->fbc.false_color) | ||
348 | dpfc_ctl |= FBC_CTL_FALSE_COLOR; | ||
349 | |||
350 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | ||
351 | |||
352 | if (IS_IVYBRIDGE(dev)) { | ||
353 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ | ||
354 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | ||
355 | I915_READ(ILK_DISPLAY_CHICKEN1) | | ||
356 | ILK_FBCQ_DIS); | ||
357 | } else { | ||
358 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ | ||
359 | I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe), | ||
360 | I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) | | ||
361 | HSW_FBCQ_DIS); | ||
362 | } | ||
363 | |||
364 | I915_WRITE(SNB_DPFC_CTL_SA, | ||
365 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); | ||
366 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); | ||
367 | |||
368 | sandybridge_blit_fbc_update(dev); | ||
369 | |||
370 | DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); | ||
371 | } | ||
372 | |||
373 | bool intel_fbc_enabled(struct drm_device *dev) | ||
374 | { | ||
375 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
376 | |||
377 | return dev_priv->fbc.enabled; | ||
378 | } | ||
379 | |||
380 | void bdw_fbc_sw_flush(struct drm_device *dev, u32 value) | ||
381 | { | ||
382 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
383 | |||
384 | if (!IS_GEN8(dev)) | ||
385 | return; | ||
386 | |||
387 | if (!intel_fbc_enabled(dev)) | ||
388 | return; | ||
389 | |||
390 | I915_WRITE(MSG_FBC_REND_STATE, value); | ||
391 | } | ||
392 | |||
393 | static void intel_fbc_work_fn(struct work_struct *__work) | ||
394 | { | ||
395 | struct intel_fbc_work *work = | ||
396 | container_of(to_delayed_work(__work), | ||
397 | struct intel_fbc_work, work); | ||
398 | struct drm_device *dev = work->crtc->dev; | ||
399 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
400 | |||
401 | mutex_lock(&dev->struct_mutex); | ||
402 | if (work == dev_priv->fbc.fbc_work) { | ||
403 | /* Double check that we haven't switched fb without cancelling | ||
404 | * the prior work. | ||
405 | */ | ||
406 | if (work->crtc->primary->fb == work->fb) { | ||
407 | dev_priv->display.enable_fbc(work->crtc); | ||
408 | |||
409 | dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; | ||
410 | dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id; | ||
411 | dev_priv->fbc.y = work->crtc->y; | ||
412 | } | ||
413 | |||
414 | dev_priv->fbc.fbc_work = NULL; | ||
415 | } | ||
416 | mutex_unlock(&dev->struct_mutex); | ||
417 | |||
418 | kfree(work); | ||
419 | } | ||
420 | |||
421 | static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) | ||
422 | { | ||
423 | if (dev_priv->fbc.fbc_work == NULL) | ||
424 | return; | ||
425 | |||
426 | DRM_DEBUG_KMS("cancelling pending FBC enable\n"); | ||
427 | |||
428 | /* Synchronisation is provided by struct_mutex and checking of | ||
429 | * dev_priv->fbc.fbc_work, so we can perform the cancellation | ||
430 | * entirely asynchronously. | ||
431 | */ | ||
432 | if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) | ||
433 | /* tasklet was killed before being run, clean up */ | ||
434 | kfree(dev_priv->fbc.fbc_work); | ||
435 | |||
436 | /* Mark the work as no longer wanted so that if it does | ||
437 | * wake-up (because the work was already running and waiting | ||
438 | * for our mutex), it will discover that is no longer | ||
439 | * necessary to run. | ||
440 | */ | ||
441 | dev_priv->fbc.fbc_work = NULL; | ||
442 | } | ||
443 | |||
444 | static void intel_enable_fbc(struct drm_crtc *crtc) | ||
445 | { | ||
446 | struct intel_fbc_work *work; | ||
447 | struct drm_device *dev = crtc->dev; | ||
448 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
449 | |||
450 | if (!dev_priv->display.enable_fbc) | ||
451 | return; | ||
452 | |||
453 | intel_cancel_fbc_work(dev_priv); | ||
454 | |||
455 | work = kzalloc(sizeof(*work), GFP_KERNEL); | ||
456 | if (work == NULL) { | ||
457 | DRM_ERROR("Failed to allocate FBC work structure\n"); | ||
458 | dev_priv->display.enable_fbc(crtc); | ||
459 | return; | ||
460 | } | ||
461 | |||
462 | work->crtc = crtc; | ||
463 | work->fb = crtc->primary->fb; | ||
464 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | ||
465 | |||
466 | dev_priv->fbc.fbc_work = work; | ||
467 | |||
468 | /* Delay the actual enabling to let pageflipping cease and the | ||
469 | * display to settle before starting the compression. Note that | ||
470 | * this delay also serves a second purpose: it allows for a | ||
471 | * vblank to pass after disabling the FBC before we attempt | ||
472 | * to modify the control registers. | ||
473 | * | ||
474 | * A more complicated solution would involve tracking vblanks | ||
475 | * following the termination of the page-flipping sequence | ||
476 | * and indeed performing the enable as a co-routine and not | ||
477 | * waiting synchronously upon the vblank. | ||
478 | * | ||
479 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb | ||
480 | */ | ||
481 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | ||
482 | } | ||
483 | |||
484 | void intel_disable_fbc(struct drm_device *dev) | ||
485 | { | ||
486 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
487 | |||
488 | intel_cancel_fbc_work(dev_priv); | ||
489 | |||
490 | if (!dev_priv->display.disable_fbc) | ||
491 | return; | ||
492 | |||
493 | dev_priv->display.disable_fbc(dev); | ||
494 | dev_priv->fbc.plane = -1; | ||
495 | } | ||
496 | |||
497 | static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, | ||
498 | enum no_fbc_reason reason) | ||
499 | { | ||
500 | if (dev_priv->fbc.no_fbc_reason == reason) | ||
501 | return false; | ||
502 | |||
503 | dev_priv->fbc.no_fbc_reason = reason; | ||
504 | return true; | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * intel_update_fbc - enable/disable FBC as needed | ||
509 | * @dev: the drm_device | ||
510 | * | ||
511 | * Set up the framebuffer compression hardware at mode set time. We | ||
512 | * enable it if possible: | ||
513 | * - plane A only (on pre-965) | ||
514 | * - no pixel mulitply/line duplication | ||
515 | * - no alpha buffer discard | ||
516 | * - no dual wide | ||
517 | * - framebuffer <= max_hdisplay in width, max_vdisplay in height | ||
518 | * | ||
519 | * We can't assume that any compression will take place (worst case), | ||
520 | * so the compressed buffer has to be the same size as the uncompressed | ||
521 | * one. It also must reside (along with the line length buffer) in | ||
522 | * stolen memory. | ||
523 | * | ||
524 | * We need to enable/disable FBC on a global basis. | ||
525 | */ | ||
526 | void intel_update_fbc(struct drm_device *dev) | ||
527 | { | ||
528 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
529 | struct drm_crtc *crtc = NULL, *tmp_crtc; | ||
530 | struct intel_crtc *intel_crtc; | ||
531 | struct drm_framebuffer *fb; | ||
532 | struct drm_i915_gem_object *obj; | ||
533 | const struct drm_display_mode *adjusted_mode; | ||
534 | unsigned int max_width, max_height; | ||
535 | |||
536 | if (!HAS_FBC(dev)) { | ||
537 | set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); | ||
538 | return; | ||
539 | } | ||
540 | |||
541 | if (!i915.powersave) { | ||
542 | if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) | ||
543 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
544 | return; | ||
545 | } | ||
546 | |||
547 | /* | ||
548 | * If FBC is already on, we just have to verify that we can | ||
549 | * keep it that way... | ||
550 | * Need to disable if: | ||
551 | * - more than one pipe is active | ||
552 | * - changing FBC params (stride, fence, mode) | ||
553 | * - new fb is too large to fit in compressed buffer | ||
554 | * - going to an unsupported config (interlace, pixel multiply, etc.) | ||
555 | */ | ||
556 | for_each_crtc(dev, tmp_crtc) { | ||
557 | if (intel_crtc_active(tmp_crtc) && | ||
558 | to_intel_crtc(tmp_crtc)->primary_enabled) { | ||
559 | if (crtc) { | ||
560 | if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) | ||
561 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | ||
562 | goto out_disable; | ||
563 | } | ||
564 | crtc = tmp_crtc; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | if (!crtc || crtc->primary->fb == NULL) { | ||
569 | if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) | ||
570 | DRM_DEBUG_KMS("no output, disabling\n"); | ||
571 | goto out_disable; | ||
572 | } | ||
573 | |||
574 | intel_crtc = to_intel_crtc(crtc); | ||
575 | fb = crtc->primary->fb; | ||
576 | obj = intel_fb_obj(fb); | ||
577 | adjusted_mode = &intel_crtc->config.adjusted_mode; | ||
578 | |||
579 | if (i915.enable_fbc < 0) { | ||
580 | if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) | ||
581 | DRM_DEBUG_KMS("disabled per chip default\n"); | ||
582 | goto out_disable; | ||
583 | } | ||
584 | if (!i915.enable_fbc) { | ||
585 | if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) | ||
586 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
587 | goto out_disable; | ||
588 | } | ||
589 | if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || | ||
590 | (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { | ||
591 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | ||
592 | DRM_DEBUG_KMS("mode incompatible with compression, " | ||
593 | "disabling\n"); | ||
594 | goto out_disable; | ||
595 | } | ||
596 | |||
597 | if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) { | ||
598 | max_width = 4096; | ||
599 | max_height = 4096; | ||
600 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
601 | max_width = 4096; | ||
602 | max_height = 2048; | ||
603 | } else { | ||
604 | max_width = 2048; | ||
605 | max_height = 1536; | ||
606 | } | ||
607 | if (intel_crtc->config.pipe_src_w > max_width || | ||
608 | intel_crtc->config.pipe_src_h > max_height) { | ||
609 | if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) | ||
610 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); | ||
611 | goto out_disable; | ||
612 | } | ||
613 | if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) && | ||
614 | intel_crtc->plane != PLANE_A) { | ||
615 | if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) | ||
616 | DRM_DEBUG_KMS("plane not A, disabling compression\n"); | ||
617 | goto out_disable; | ||
618 | } | ||
619 | |||
620 | /* The use of a CPU fence is mandatory in order to detect writes | ||
621 | * by the CPU to the scanout and trigger updates to the FBC. | ||
622 | */ | ||
623 | if (obj->tiling_mode != I915_TILING_X || | ||
624 | obj->fence_reg == I915_FENCE_REG_NONE) { | ||
625 | if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) | ||
626 | DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); | ||
627 | goto out_disable; | ||
628 | } | ||
629 | if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && | ||
630 | to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) { | ||
631 | if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) | ||
632 | DRM_DEBUG_KMS("Rotation unsupported, disabling\n"); | ||
633 | goto out_disable; | ||
634 | } | ||
635 | |||
636 | /* If the kernel debugger is active, always disable compression */ | ||
637 | if (in_dbg_master()) | ||
638 | goto out_disable; | ||
639 | |||
640 | if (i915_gem_stolen_setup_compression(dev, obj->base.size, | ||
641 | drm_format_plane_cpp(fb->pixel_format, 0))) { | ||
642 | if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) | ||
643 | DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); | ||
644 | goto out_disable; | ||
645 | } | ||
646 | |||
647 | /* If the scanout has not changed, don't modify the FBC settings. | ||
648 | * Note that we make the fundamental assumption that the fb->obj | ||
649 | * cannot be unpinned (and have its GTT offset and fence revoked) | ||
650 | * without first being decoupled from the scanout and FBC disabled. | ||
651 | */ | ||
652 | if (dev_priv->fbc.plane == intel_crtc->plane && | ||
653 | dev_priv->fbc.fb_id == fb->base.id && | ||
654 | dev_priv->fbc.y == crtc->y) | ||
655 | return; | ||
656 | |||
657 | if (intel_fbc_enabled(dev)) { | ||
658 | /* We update FBC along two paths, after changing fb/crtc | ||
659 | * configuration (modeswitching) and after page-flipping | ||
660 | * finishes. For the latter, we know that not only did | ||
661 | * we disable the FBC at the start of the page-flip | ||
662 | * sequence, but also more than one vblank has passed. | ||
663 | * | ||
664 | * For the former case of modeswitching, it is possible | ||
665 | * to switch between two FBC valid configurations | ||
666 | * instantaneously so we do need to disable the FBC | ||
667 | * before we can modify its control registers. We also | ||
668 | * have to wait for the next vblank for that to take | ||
669 | * effect. However, since we delay enabling FBC we can | ||
670 | * assume that a vblank has passed since disabling and | ||
671 | * that we can safely alter the registers in the deferred | ||
672 | * callback. | ||
673 | * | ||
674 | * In the scenario that we go from a valid to invalid | ||
675 | * and then back to valid FBC configuration we have | ||
676 | * no strict enforcement that a vblank occurred since | ||
677 | * disabling the FBC. However, along all current pipe | ||
678 | * disabling paths we do need to wait for a vblank at | ||
679 | * some point. And we wait before enabling FBC anyway. | ||
680 | */ | ||
681 | DRM_DEBUG_KMS("disabling active FBC for update\n"); | ||
682 | intel_disable_fbc(dev); | ||
683 | } | ||
684 | |||
685 | intel_enable_fbc(crtc); | ||
686 | dev_priv->fbc.no_fbc_reason = FBC_OK; | ||
687 | return; | ||
688 | |||
689 | out_disable: | ||
690 | /* Multiple disables should be harmless */ | ||
691 | if (intel_fbc_enabled(dev)) { | ||
692 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | ||
693 | intel_disable_fbc(dev); | ||
694 | } | ||
695 | i915_gem_stolen_cleanup_compression(dev); | ||
696 | } | ||
697 | 79 | ||
698 | static void i915_pineview_get_mem_freq(struct drm_device *dev) | 80 | static void i915_pineview_get_mem_freq(struct drm_device *dev) |
699 | { | 81 | { |
@@ -3286,7 +2668,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, | |||
3286 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | 2668 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
3287 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2669 | struct intel_plane *intel_plane = to_intel_plane(plane); |
3288 | 2670 | ||
3289 | if (intel_plane->pipe == pipe) | 2671 | if (intel_plane->pipe == pipe && |
2672 | plane->type == DRM_PLANE_TYPE_OVERLAY) | ||
3290 | p->plane[i++] = intel_plane->wm; | 2673 | p->plane[i++] = intel_plane->wm; |
3291 | } | 2674 | } |
3292 | } | 2675 | } |
@@ -3621,9 +3004,8 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv, | |||
3621 | skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { | 3004 | skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { |
3622 | skl_wm_flush_pipe(dev_priv, pipe, 2); | 3005 | skl_wm_flush_pipe(dev_priv, pipe, 2); |
3623 | intel_wait_for_vblank(dev, pipe); | 3006 | intel_wait_for_vblank(dev, pipe); |
3007 | reallocated[pipe] = true; | ||
3624 | } | 3008 | } |
3625 | |||
3626 | reallocated[pipe] = true; | ||
3627 | } | 3009 | } |
3628 | 3010 | ||
3629 | /* | 3011 | /* |
@@ -5307,7 +4689,8 @@ static void cherryview_enable_rps(struct drm_device *dev) | |||
5307 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); | 4689 | I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); |
5308 | I915_WRITE(GEN6_RC_SLEEP, 0); | 4690 | I915_WRITE(GEN6_RC_SLEEP, 0); |
5309 | 4691 | ||
5310 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ | 4692 | /* TO threshold set to 1750 us ( 0x557 * 1.28 us) */ |
4693 | I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); | ||
5311 | 4694 | ||
5312 | /* allows RC6 residency counter to work */ | 4695 | /* allows RC6 residency counter to work */ |
5313 | I915_WRITE(VLV_COUNTER_CONTROL, | 4696 | I915_WRITE(VLV_COUNTER_CONTROL, |
@@ -5321,7 +4704,7 @@ static void cherryview_enable_rps(struct drm_device *dev) | |||
5321 | /* 3: Enable RC6 */ | 4704 | /* 3: Enable RC6 */ |
5322 | if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && | 4705 | if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && |
5323 | (pcbr >> VLV_PCBR_ADDR_SHIFT)) | 4706 | (pcbr >> VLV_PCBR_ADDR_SHIFT)) |
5324 | rc6_mode = GEN6_RC_CTL_EI_MODE(1); | 4707 | rc6_mode = GEN7_RC_CTL_TO_MODE; |
5325 | 4708 | ||
5326 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); | 4709 | I915_WRITE(GEN6_RC_CONTROL, rc6_mode); |
5327 | 4710 | ||
@@ -5681,146 +5064,27 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) | |||
5681 | return ((m * x) / 127) - b; | 5064 | return ((m * x) / 127) - b; |
5682 | } | 5065 | } |
5683 | 5066 | ||
5684 | static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | 5067 | static int _pxvid_to_vd(u8 pxvid) |
5068 | { | ||
5069 | if (pxvid == 0) | ||
5070 | return 0; | ||
5071 | |||
5072 | if (pxvid >= 8 && pxvid < 31) | ||
5073 | pxvid = 31; | ||
5074 | |||
5075 | return (pxvid + 2) * 125; | ||
5076 | } | ||
5077 | |||
5078 | static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) | ||
5685 | { | 5079 | { |
5686 | struct drm_device *dev = dev_priv->dev; | 5080 | struct drm_device *dev = dev_priv->dev; |
5687 | static const struct v_table { | 5081 | const int vd = _pxvid_to_vd(pxvid); |
5688 | u16 vd; /* in .1 mil */ | 5082 | const int vm = vd - 1125; |
5689 | u16 vm; /* in .1 mil */ | 5083 | |
5690 | } v_table[] = { | ||
5691 | { 0, 0, }, | ||
5692 | { 375, 0, }, | ||
5693 | { 500, 0, }, | ||
5694 | { 625, 0, }, | ||
5695 | { 750, 0, }, | ||
5696 | { 875, 0, }, | ||
5697 | { 1000, 0, }, | ||
5698 | { 1125, 0, }, | ||
5699 | { 4125, 3000, }, | ||
5700 | { 4125, 3000, }, | ||
5701 | { 4125, 3000, }, | ||
5702 | { 4125, 3000, }, | ||
5703 | { 4125, 3000, }, | ||
5704 | { 4125, 3000, }, | ||
5705 | { 4125, 3000, }, | ||
5706 | { 4125, 3000, }, | ||
5707 | { 4125, 3000, }, | ||
5708 | { 4125, 3000, }, | ||
5709 | { 4125, 3000, }, | ||
5710 | { 4125, 3000, }, | ||
5711 | { 4125, 3000, }, | ||
5712 | { 4125, 3000, }, | ||
5713 | { 4125, 3000, }, | ||
5714 | { 4125, 3000, }, | ||
5715 | { 4125, 3000, }, | ||
5716 | { 4125, 3000, }, | ||
5717 | { 4125, 3000, }, | ||
5718 | { 4125, 3000, }, | ||
5719 | { 4125, 3000, }, | ||
5720 | { 4125, 3000, }, | ||
5721 | { 4125, 3000, }, | ||
5722 | { 4125, 3000, }, | ||
5723 | { 4250, 3125, }, | ||
5724 | { 4375, 3250, }, | ||
5725 | { 4500, 3375, }, | ||
5726 | { 4625, 3500, }, | ||
5727 | { 4750, 3625, }, | ||
5728 | { 4875, 3750, }, | ||
5729 | { 5000, 3875, }, | ||
5730 | { 5125, 4000, }, | ||
5731 | { 5250, 4125, }, | ||
5732 | { 5375, 4250, }, | ||
5733 | { 5500, 4375, }, | ||
5734 | { 5625, 4500, }, | ||
5735 | { 5750, 4625, }, | ||
5736 | { 5875, 4750, }, | ||
5737 | { 6000, 4875, }, | ||
5738 | { 6125, 5000, }, | ||
5739 | { 6250, 5125, }, | ||
5740 | { 6375, 5250, }, | ||
5741 | { 6500, 5375, }, | ||
5742 | { 6625, 5500, }, | ||
5743 | { 6750, 5625, }, | ||
5744 | { 6875, 5750, }, | ||
5745 | { 7000, 5875, }, | ||
5746 | { 7125, 6000, }, | ||
5747 | { 7250, 6125, }, | ||
5748 | { 7375, 6250, }, | ||
5749 | { 7500, 6375, }, | ||
5750 | { 7625, 6500, }, | ||
5751 | { 7750, 6625, }, | ||
5752 | { 7875, 6750, }, | ||
5753 | { 8000, 6875, }, | ||
5754 | { 8125, 7000, }, | ||
5755 | { 8250, 7125, }, | ||
5756 | { 8375, 7250, }, | ||
5757 | { 8500, 7375, }, | ||
5758 | { 8625, 7500, }, | ||
5759 | { 8750, 7625, }, | ||
5760 | { 8875, 7750, }, | ||
5761 | { 9000, 7875, }, | ||
5762 | { 9125, 8000, }, | ||
5763 | { 9250, 8125, }, | ||
5764 | { 9375, 8250, }, | ||
5765 | { 9500, 8375, }, | ||
5766 | { 9625, 8500, }, | ||
5767 | { 9750, 8625, }, | ||
5768 | { 9875, 8750, }, | ||
5769 | { 10000, 8875, }, | ||
5770 | { 10125, 9000, }, | ||
5771 | { 10250, 9125, }, | ||
5772 | { 10375, 9250, }, | ||
5773 | { 10500, 9375, }, | ||
5774 | { 10625, 9500, }, | ||
5775 | { 10750, 9625, }, | ||
5776 | { 10875, 9750, }, | ||
5777 | { 11000, 9875, }, | ||
5778 | { 11125, 10000, }, | ||
5779 | { 11250, 10125, }, | ||
5780 | { 11375, 10250, }, | ||
5781 | { 11500, 10375, }, | ||
5782 | { 11625, 10500, }, | ||
5783 | { 11750, 10625, }, | ||
5784 | { 11875, 10750, }, | ||
5785 | { 12000, 10875, }, | ||
5786 | { 12125, 11000, }, | ||
5787 | { 12250, 11125, }, | ||
5788 | { 12375, 11250, }, | ||
5789 | { 12500, 11375, }, | ||
5790 | { 12625, 11500, }, | ||
5791 | { 12750, 11625, }, | ||
5792 | { 12875, 11750, }, | ||
5793 | { 13000, 11875, }, | ||
5794 | { 13125, 12000, }, | ||
5795 | { 13250, 12125, }, | ||
5796 | { 13375, 12250, }, | ||
5797 | { 13500, 12375, }, | ||
5798 | { 13625, 12500, }, | ||
5799 | { 13750, 12625, }, | ||
5800 | { 13875, 12750, }, | ||
5801 | { 14000, 12875, }, | ||
5802 | { 14125, 13000, }, | ||
5803 | { 14250, 13125, }, | ||
5804 | { 14375, 13250, }, | ||
5805 | { 14500, 13375, }, | ||
5806 | { 14625, 13500, }, | ||
5807 | { 14750, 13625, }, | ||
5808 | { 14875, 13750, }, | ||
5809 | { 15000, 13875, }, | ||
5810 | { 15125, 14000, }, | ||
5811 | { 15250, 14125, }, | ||
5812 | { 15375, 14250, }, | ||
5813 | { 15500, 14375, }, | ||
5814 | { 15625, 14500, }, | ||
5815 | { 15750, 14625, }, | ||
5816 | { 15875, 14750, }, | ||
5817 | { 16000, 14875, }, | ||
5818 | { 16125, 15000, }, | ||
5819 | }; | ||
5820 | if (INTEL_INFO(dev)->is_mobile) | 5084 | if (INTEL_INFO(dev)->is_mobile) |
5821 | return v_table[pxvid].vm; | 5085 | return vm > 0 ? vm : 0; |
5822 | else | 5086 | |
5823 | return v_table[pxvid].vd; | 5087 | return vd; |
5824 | } | 5088 | } |
5825 | 5089 | ||
5826 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) | 5090 | static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) |
@@ -7051,43 +6315,12 @@ void intel_suspend_hw(struct drm_device *dev) | |||
7051 | lpt_suspend_hw(dev); | 6315 | lpt_suspend_hw(dev); |
7052 | } | 6316 | } |
7053 | 6317 | ||
7054 | static void intel_init_fbc(struct drm_i915_private *dev_priv) | ||
7055 | { | ||
7056 | if (!HAS_FBC(dev_priv)) { | ||
7057 | dev_priv->fbc.enabled = false; | ||
7058 | return; | ||
7059 | } | ||
7060 | |||
7061 | if (INTEL_INFO(dev_priv)->gen >= 7) { | ||
7062 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | ||
7063 | dev_priv->display.enable_fbc = gen7_enable_fbc; | ||
7064 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | ||
7065 | } else if (INTEL_INFO(dev_priv)->gen >= 5) { | ||
7066 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; | ||
7067 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | ||
7068 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | ||
7069 | } else if (IS_GM45(dev_priv)) { | ||
7070 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; | ||
7071 | dev_priv->display.enable_fbc = g4x_enable_fbc; | ||
7072 | dev_priv->display.disable_fbc = g4x_disable_fbc; | ||
7073 | } else { | ||
7074 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; | ||
7075 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | ||
7076 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | ||
7077 | |||
7078 | /* This value was pulled out of someone's hat */ | ||
7079 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); | ||
7080 | } | ||
7081 | |||
7082 | dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev); | ||
7083 | } | ||
7084 | |||
7085 | /* Set up chip specific power management-related functions */ | 6318 | /* Set up chip specific power management-related functions */ |
7086 | void intel_init_pm(struct drm_device *dev) | 6319 | void intel_init_pm(struct drm_device *dev) |
7087 | { | 6320 | { |
7088 | struct drm_i915_private *dev_priv = dev->dev_private; | 6321 | struct drm_i915_private *dev_priv = dev->dev_private; |
7089 | 6322 | ||
7090 | intel_init_fbc(dev_priv); | 6323 | intel_fbc_init(dev_priv); |
7091 | 6324 | ||
7092 | /* For cxsr */ | 6325 | /* For cxsr */ |
7093 | if (IS_PINEVIEW(dev)) | 6326 | if (IS_PINEVIEW(dev)) |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 716b8a961eea..dd0e6e0447d4 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -61,14 +61,15 @@ static bool is_edp_psr(struct intel_dp *intel_dp) | |||
61 | return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; | 61 | return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; |
62 | } | 62 | } |
63 | 63 | ||
64 | bool intel_psr_is_enabled(struct drm_device *dev) | 64 | static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) |
65 | { | 65 | { |
66 | struct drm_i915_private *dev_priv = dev->dev_private; | 66 | struct drm_i915_private *dev_priv = dev->dev_private; |
67 | uint32_t val; | ||
67 | 68 | ||
68 | if (!HAS_PSR(dev)) | 69 | val = I915_READ(VLV_PSRSTAT(pipe)) & |
69 | return false; | 70 | VLV_EDP_PSR_CURR_STATE_MASK; |
70 | 71 | return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) || | |
71 | return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; | 72 | (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE); |
72 | } | 73 | } |
73 | 74 | ||
74 | static void intel_psr_write_vsc(struct intel_dp *intel_dp, | 75 | static void intel_psr_write_vsc(struct intel_dp *intel_dp, |
@@ -100,7 +101,23 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, | |||
100 | POSTING_READ(ctl_reg); | 101 | POSTING_READ(ctl_reg); |
101 | } | 102 | } |
102 | 103 | ||
103 | static void intel_psr_setup_vsc(struct intel_dp *intel_dp) | 104 | static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) |
105 | { | ||
106 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
107 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
108 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
109 | struct drm_crtc *crtc = intel_dig_port->base.base.crtc; | ||
110 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
111 | uint32_t val; | ||
112 | |||
113 | /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ | ||
114 | val = I915_READ(VLV_VSCSDP(pipe)); | ||
115 | val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; | ||
116 | val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; | ||
117 | I915_WRITE(VLV_VSCSDP(pipe), val); | ||
118 | } | ||
119 | |||
120 | static void hsw_psr_setup_vsc(struct intel_dp *intel_dp) | ||
104 | { | 121 | { |
105 | struct edp_vsc_psr psr_vsc; | 122 | struct edp_vsc_psr psr_vsc; |
106 | 123 | ||
@@ -113,14 +130,20 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp) | |||
113 | intel_psr_write_vsc(intel_dp, &psr_vsc); | 130 | intel_psr_write_vsc(intel_dp, &psr_vsc); |
114 | } | 131 | } |
115 | 132 | ||
116 | static void intel_psr_enable_sink(struct intel_dp *intel_dp) | 133 | static void vlv_psr_enable_sink(struct intel_dp *intel_dp) |
134 | { | ||
135 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, | ||
136 | DP_PSR_ENABLE); | ||
137 | } | ||
138 | |||
139 | static void hsw_psr_enable_sink(struct intel_dp *intel_dp) | ||
117 | { | 140 | { |
118 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 141 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
119 | struct drm_device *dev = dig_port->base.base.dev; | 142 | struct drm_device *dev = dig_port->base.base.dev; |
120 | struct drm_i915_private *dev_priv = dev->dev_private; | 143 | struct drm_i915_private *dev_priv = dev->dev_private; |
121 | uint32_t aux_clock_divider; | 144 | uint32_t aux_clock_divider; |
122 | int precharge = 0x3; | 145 | int precharge = 0x3; |
123 | bool only_standby = false; | 146 | bool only_standby = dev_priv->vbt.psr.full_link; |
124 | static const uint8_t aux_msg[] = { | 147 | static const uint8_t aux_msg[] = { |
125 | [0] = DP_AUX_NATIVE_WRITE << 4, | 148 | [0] = DP_AUX_NATIVE_WRITE << 4, |
126 | [1] = DP_SET_POWER >> 8, | 149 | [1] = DP_SET_POWER >> 8, |
@@ -157,13 +180,50 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) | |||
157 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); | 180 | (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); |
158 | } | 181 | } |
159 | 182 | ||
160 | static void intel_psr_enable_source(struct intel_dp *intel_dp) | 183 | static void vlv_psr_enable_source(struct intel_dp *intel_dp) |
184 | { | ||
185 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
186 | struct drm_device *dev = dig_port->base.base.dev; | ||
187 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
188 | struct drm_crtc *crtc = dig_port->base.base.crtc; | ||
189 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
190 | |||
191 | /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */ | ||
192 | I915_WRITE(VLV_PSRCTL(pipe), | ||
193 | VLV_EDP_PSR_MODE_SW_TIMER | | ||
194 | VLV_EDP_PSR_SRC_TRANSMITTER_STATE | | ||
195 | VLV_EDP_PSR_ENABLE); | ||
196 | } | ||
197 | |||
198 | static void vlv_psr_activate(struct intel_dp *intel_dp) | ||
199 | { | ||
200 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
201 | struct drm_device *dev = dig_port->base.base.dev; | ||
202 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
203 | struct drm_crtc *crtc = dig_port->base.base.crtc; | ||
204 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
205 | |||
206 | /* Let's do the transition from PSR_state 1 to PSR_state 2 | ||
207 | * that is PSR transition to active - static frame transmission. | ||
208 | * Then Hardware is responsible for the transition to PSR_state 3 | ||
209 | * that is PSR active - no Remote Frame Buffer (RFB) update. | ||
210 | */ | ||
211 | I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | | ||
212 | VLV_EDP_PSR_ACTIVE_ENTRY); | ||
213 | } | ||
214 | |||
215 | static void hsw_psr_enable_source(struct intel_dp *intel_dp) | ||
161 | { | 216 | { |
162 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 217 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
163 | struct drm_device *dev = dig_port->base.base.dev; | 218 | struct drm_device *dev = dig_port->base.base.dev; |
164 | struct drm_i915_private *dev_priv = dev->dev_private; | 219 | struct drm_i915_private *dev_priv = dev->dev_private; |
165 | uint32_t max_sleep_time = 0x1f; | 220 | uint32_t max_sleep_time = 0x1f; |
166 | uint32_t idle_frames = 1; | 221 | /* Lately it was identified that depending on panel idle frame count |
222 | * calculated at HW can be off by 1. So let's use what came | ||
223 | * from VBT + 1 and at minimum 2 to be on the safe side. | ||
224 | */ | ||
225 | uint32_t idle_frames = dev_priv->vbt.psr.idle_frames ? | ||
226 | dev_priv->vbt.psr.idle_frames + 1 : 2; | ||
167 | uint32_t val = 0x0; | 227 | uint32_t val = 0x0; |
168 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; | 228 | const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; |
169 | bool only_standby = false; | 229 | bool only_standby = false; |
@@ -176,7 +236,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp) | |||
176 | val |= EDP_PSR_TP2_TP3_TIME_0us; | 236 | val |= EDP_PSR_TP2_TP3_TIME_0us; |
177 | val |= EDP_PSR_TP1_TIME_0us; | 237 | val |= EDP_PSR_TP1_TIME_0us; |
178 | val |= EDP_PSR_SKIP_AUX_EXIT; | 238 | val |= EDP_PSR_SKIP_AUX_EXIT; |
179 | val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0; | ||
180 | } else | 239 | } else |
181 | val |= EDP_PSR_LINK_DISABLE; | 240 | val |= EDP_PSR_LINK_DISABLE; |
182 | 241 | ||
@@ -231,7 +290,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) | |||
231 | return true; | 290 | return true; |
232 | } | 291 | } |
233 | 292 | ||
234 | static void intel_psr_do_enable(struct intel_dp *intel_dp) | 293 | static void intel_psr_activate(struct intel_dp *intel_dp) |
235 | { | 294 | { |
236 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 295 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
237 | struct drm_device *dev = intel_dig_port->base.base.dev; | 296 | struct drm_device *dev = intel_dig_port->base.base.dev; |
@@ -242,7 +301,14 @@ static void intel_psr_do_enable(struct intel_dp *intel_dp) | |||
242 | lockdep_assert_held(&dev_priv->psr.lock); | 301 | lockdep_assert_held(&dev_priv->psr.lock); |
243 | 302 | ||
244 | /* Enable/Re-enable PSR on the host */ | 303 | /* Enable/Re-enable PSR on the host */ |
245 | intel_psr_enable_source(intel_dp); | 304 | if (HAS_DDI(dev)) |
305 | /* On HSW+ after we enable PSR on source it will activate it | ||
306 | * as soon as it match configure idle_frame count. So | ||
307 | * we just actually enable it here on activation time. | ||
308 | */ | ||
309 | hsw_psr_enable_source(intel_dp); | ||
310 | else | ||
311 | vlv_psr_activate(intel_dp); | ||
246 | 312 | ||
247 | dev_priv->psr.active = true; | 313 | dev_priv->psr.active = true; |
248 | } | 314 | } |
@@ -280,37 +346,67 @@ void intel_psr_enable(struct intel_dp *intel_dp) | |||
280 | 346 | ||
281 | dev_priv->psr.busy_frontbuffer_bits = 0; | 347 | dev_priv->psr.busy_frontbuffer_bits = 0; |
282 | 348 | ||
283 | intel_psr_setup_vsc(intel_dp); | 349 | if (HAS_DDI(dev)) { |
350 | hsw_psr_setup_vsc(intel_dp); | ||
284 | 351 | ||
285 | /* Avoid continuous PSR exit by masking memup and hpd */ | 352 | /* Avoid continuous PSR exit by masking memup and hpd */ |
286 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | | 353 | I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | |
287 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); | 354 | EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); |
288 | 355 | ||
289 | /* Enable PSR on the panel */ | 356 | /* Enable PSR on the panel */ |
290 | intel_psr_enable_sink(intel_dp); | 357 | hsw_psr_enable_sink(intel_dp); |
358 | } else { | ||
359 | vlv_psr_setup_vsc(intel_dp); | ||
360 | |||
361 | /* Enable PSR on the panel */ | ||
362 | vlv_psr_enable_sink(intel_dp); | ||
363 | |||
364 | /* On HSW+ enable_source also means go to PSR entry/active | ||
365 | * state as soon as idle_frame achieved and here would be | ||
366 | * to soon. However on VLV enable_source just enable PSR | ||
367 | * but let it on inactive state. So we might do this prior | ||
368 | * to active transition, i.e. here. | ||
369 | */ | ||
370 | vlv_psr_enable_source(intel_dp); | ||
371 | } | ||
291 | 372 | ||
292 | dev_priv->psr.enabled = intel_dp; | 373 | dev_priv->psr.enabled = intel_dp; |
293 | unlock: | 374 | unlock: |
294 | mutex_unlock(&dev_priv->psr.lock); | 375 | mutex_unlock(&dev_priv->psr.lock); |
295 | } | 376 | } |
296 | 377 | ||
297 | /** | 378 | static void vlv_psr_disable(struct intel_dp *intel_dp) |
298 | * intel_psr_disable - Disable PSR | ||
299 | * @intel_dp: Intel DP | ||
300 | * | ||
301 | * This function needs to be called before disabling pipe. | ||
302 | */ | ||
303 | void intel_psr_disable(struct intel_dp *intel_dp) | ||
304 | { | 379 | { |
305 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 380 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
306 | struct drm_device *dev = intel_dig_port->base.base.dev; | 381 | struct drm_device *dev = intel_dig_port->base.base.dev; |
307 | struct drm_i915_private *dev_priv = dev->dev_private; | 382 | struct drm_i915_private *dev_priv = dev->dev_private; |
383 | struct intel_crtc *intel_crtc = | ||
384 | to_intel_crtc(intel_dig_port->base.base.crtc); | ||
385 | uint32_t val; | ||
308 | 386 | ||
309 | mutex_lock(&dev_priv->psr.lock); | 387 | if (dev_priv->psr.active) { |
310 | if (!dev_priv->psr.enabled) { | 388 | /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ |
311 | mutex_unlock(&dev_priv->psr.lock); | 389 | if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & |
312 | return; | 390 | VLV_EDP_PSR_IN_TRANS) == 0, 1)) |
391 | WARN(1, "PSR transition took longer than expected\n"); | ||
392 | |||
393 | val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); | ||
394 | val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; | ||
395 | val &= ~VLV_EDP_PSR_ENABLE; | ||
396 | val &= ~VLV_EDP_PSR_MODE_MASK; | ||
397 | I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val); | ||
398 | |||
399 | dev_priv->psr.active = false; | ||
400 | } else { | ||
401 | WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe)); | ||
313 | } | 402 | } |
403 | } | ||
404 | |||
405 | static void hsw_psr_disable(struct intel_dp *intel_dp) | ||
406 | { | ||
407 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
408 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
409 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
314 | 410 | ||
315 | if (dev_priv->psr.active) { | 411 | if (dev_priv->psr.active) { |
316 | I915_WRITE(EDP_PSR_CTL(dev), | 412 | I915_WRITE(EDP_PSR_CTL(dev), |
@@ -325,6 +421,30 @@ void intel_psr_disable(struct intel_dp *intel_dp) | |||
325 | } else { | 421 | } else { |
326 | WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); | 422 | WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE); |
327 | } | 423 | } |
424 | } | ||
425 | |||
426 | /** | ||
427 | * intel_psr_disable - Disable PSR | ||
428 | * @intel_dp: Intel DP | ||
429 | * | ||
430 | * This function needs to be called before disabling pipe. | ||
431 | */ | ||
432 | void intel_psr_disable(struct intel_dp *intel_dp) | ||
433 | { | ||
434 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
435 | struct drm_device *dev = intel_dig_port->base.base.dev; | ||
436 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
437 | |||
438 | mutex_lock(&dev_priv->psr.lock); | ||
439 | if (!dev_priv->psr.enabled) { | ||
440 | mutex_unlock(&dev_priv->psr.lock); | ||
441 | return; | ||
442 | } | ||
443 | |||
444 | if (HAS_DDI(dev)) | ||
445 | hsw_psr_disable(intel_dp); | ||
446 | else | ||
447 | vlv_psr_disable(intel_dp); | ||
328 | 448 | ||
329 | dev_priv->psr.enabled = NULL; | 449 | dev_priv->psr.enabled = NULL; |
330 | mutex_unlock(&dev_priv->psr.lock); | 450 | mutex_unlock(&dev_priv->psr.lock); |
@@ -337,18 +457,27 @@ static void intel_psr_work(struct work_struct *work) | |||
337 | struct drm_i915_private *dev_priv = | 457 | struct drm_i915_private *dev_priv = |
338 | container_of(work, typeof(*dev_priv), psr.work.work); | 458 | container_of(work, typeof(*dev_priv), psr.work.work); |
339 | struct intel_dp *intel_dp = dev_priv->psr.enabled; | 459 | struct intel_dp *intel_dp = dev_priv->psr.enabled; |
460 | struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; | ||
461 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
340 | 462 | ||
341 | /* We have to make sure PSR is ready for re-enable | 463 | /* We have to make sure PSR is ready for re-enable |
342 | * otherwise it keeps disabled until next full enable/disable cycle. | 464 | * otherwise it keeps disabled until next full enable/disable cycle. |
343 | * PSR might take some time to get fully disabled | 465 | * PSR might take some time to get fully disabled |
344 | * and be ready for re-enable. | 466 | * and be ready for re-enable. |
345 | */ | 467 | */ |
346 | if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & | 468 | if (HAS_DDI(dev_priv->dev)) { |
347 | EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { | 469 | if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) & |
348 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | 470 | EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { |
349 | return; | 471 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); |
472 | return; | ||
473 | } | ||
474 | } else { | ||
475 | if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & | ||
476 | VLV_EDP_PSR_IN_TRANS) == 0, 1)) { | ||
477 | DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); | ||
478 | return; | ||
479 | } | ||
350 | } | 480 | } |
351 | |||
352 | mutex_lock(&dev_priv->psr.lock); | 481 | mutex_lock(&dev_priv->psr.lock); |
353 | intel_dp = dev_priv->psr.enabled; | 482 | intel_dp = dev_priv->psr.enabled; |
354 | 483 | ||
@@ -363,7 +492,7 @@ static void intel_psr_work(struct work_struct *work) | |||
363 | if (dev_priv->psr.busy_frontbuffer_bits) | 492 | if (dev_priv->psr.busy_frontbuffer_bits) |
364 | goto unlock; | 493 | goto unlock; |
365 | 494 | ||
366 | intel_psr_do_enable(intel_dp); | 495 | intel_psr_activate(intel_dp); |
367 | unlock: | 496 | unlock: |
368 | mutex_unlock(&dev_priv->psr.lock); | 497 | mutex_unlock(&dev_priv->psr.lock); |
369 | } | 498 | } |
@@ -371,17 +500,47 @@ unlock: | |||
371 | static void intel_psr_exit(struct drm_device *dev) | 500 | static void intel_psr_exit(struct drm_device *dev) |
372 | { | 501 | { |
373 | struct drm_i915_private *dev_priv = dev->dev_private; | 502 | struct drm_i915_private *dev_priv = dev->dev_private; |
503 | struct intel_dp *intel_dp = dev_priv->psr.enabled; | ||
504 | struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; | ||
505 | enum pipe pipe = to_intel_crtc(crtc)->pipe; | ||
506 | u32 val; | ||
374 | 507 | ||
375 | if (dev_priv->psr.active) { | 508 | if (!dev_priv->psr.active) |
376 | u32 val = I915_READ(EDP_PSR_CTL(dev)); | 509 | return; |
510 | |||
511 | if (HAS_DDI(dev)) { | ||
512 | val = I915_READ(EDP_PSR_CTL(dev)); | ||
377 | 513 | ||
378 | WARN_ON(!(val & EDP_PSR_ENABLE)); | 514 | WARN_ON(!(val & EDP_PSR_ENABLE)); |
379 | 515 | ||
380 | I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); | 516 | I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); |
381 | 517 | ||
382 | dev_priv->psr.active = false; | 518 | dev_priv->psr.active = false; |
519 | } else { | ||
520 | val = I915_READ(VLV_PSRCTL(pipe)); | ||
521 | |||
522 | /* Here we do the transition from PSR_state 3 to PSR_state 5 | ||
523 | * directly once PSR State 4 that is active with single frame | ||
524 | * update can be skipped. PSR_state 5 that is PSR exit then | ||
525 | * Hardware is responsible to transition back to PSR_state 1 | ||
526 | * that is PSR inactive. Same state after | ||
527 | * vlv_edp_psr_enable_source. | ||
528 | */ | ||
529 | val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; | ||
530 | I915_WRITE(VLV_PSRCTL(pipe), val); | ||
531 | |||
532 | /* Send AUX wake up - Spec says after transitioning to PSR | ||
533 | * active we have to send AUX wake up by writing 01h in DPCD | ||
534 | * 600h of sink device. | ||
535 | * XXX: This might slow down the transition, but without this | ||
536 | * HW doesn't complete the transition to PSR_state 1 and we | ||
537 | * never get the screen updated. | ||
538 | */ | ||
539 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, | ||
540 | DP_SET_POWER_D0); | ||
383 | } | 541 | } |
384 | 542 | ||
543 | dev_priv->psr.active = false; | ||
385 | } | 544 | } |
386 | 545 | ||
387 | /** | 546 | /** |
@@ -459,6 +618,17 @@ void intel_psr_flush(struct drm_device *dev, | |||
459 | (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) | 618 | (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe))) |
460 | intel_psr_exit(dev); | 619 | intel_psr_exit(dev); |
461 | 620 | ||
621 | /* | ||
622 | * On Valleyview and Cherryview we don't use hardware tracking so | ||
623 | * sprite plane updates or cursor moves don't result in a PSR | ||
624 | * invalidating. Which means we need to manually fake this in | ||
625 | * software for all flushes, not just when we've seen a preceding | ||
626 | * invalidation through frontbuffer rendering. */ | ||
627 | if (!HAS_DDI(dev) && | ||
628 | ((frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)) || | ||
629 | (frontbuffer_bits & INTEL_FRONTBUFFER_CURSOR(pipe)))) | ||
630 | intel_psr_exit(dev); | ||
631 | |||
462 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) | 632 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
463 | schedule_delayed_work(&dev_priv->psr.work, | 633 | schedule_delayed_work(&dev_priv->psr.work, |
464 | msecs_to_jiffies(100)); | 634 | msecs_to_jiffies(100)); |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c index 56c1429d8a60..11c8e7b3dd7c 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Generated by: intel-gpu-tools-1.8-220-g01153e7 | ||
24 | */ | ||
25 | |||
1 | #include "intel_renderstate.h" | 26 | #include "intel_renderstate.h" |
2 | 27 | ||
3 | static const u32 gen6_null_state_relocs[] = { | 28 | static const u32 gen6_null_state_relocs[] = { |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c index 419e35a7b0ff..655180646152 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Generated by: intel-gpu-tools-1.8-220-g01153e7 | ||
24 | */ | ||
25 | |||
1 | #include "intel_renderstate.h" | 26 | #include "intel_renderstate.h" |
2 | 27 | ||
3 | static const u32 gen7_null_state_relocs[] = { | 28 | static const u32 gen7_null_state_relocs[] = { |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c index 78011d73fa9f..95288a34c15d 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Generated by: intel-gpu-tools-1.8-220-g01153e7 | ||
24 | */ | ||
25 | |||
1 | #include "intel_renderstate.h" | 26 | #include "intel_renderstate.h" |
2 | 27 | ||
3 | static const u32 gen8_null_state_relocs[] = { | 28 | static const u32 gen8_null_state_relocs[] = { |
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c index 875075373807..16a7ec273bd9 100644 --- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c +++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * Copyright © 2014 Intel Corporation | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Generated by: intel-gpu-tools-1.8-220-g01153e7 | ||
24 | */ | ||
25 | |||
1 | #include "intel_renderstate.h" | 26 | #include "intel_renderstate.h" |
2 | 27 | ||
3 | static const u32 gen9_null_state_relocs[] = { | 28 | static const u32 gen9_null_state_relocs[] = { |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c7bc93d28d84..12a36f0ca53d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -52,16 +52,27 @@ intel_ring_initialized(struct intel_engine_cs *ring) | |||
52 | 52 | ||
53 | int __intel_ring_space(int head, int tail, int size) | 53 | int __intel_ring_space(int head, int tail, int size) |
54 | { | 54 | { |
55 | int space = head - (tail + I915_RING_FREE_SPACE); | 55 | int space = head - tail; |
56 | if (space < 0) | 56 | if (space <= 0) |
57 | space += size; | 57 | space += size; |
58 | return space; | 58 | return space - I915_RING_FREE_SPACE; |
59 | } | ||
60 | |||
61 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf) | ||
62 | { | ||
63 | if (ringbuf->last_retired_head != -1) { | ||
64 | ringbuf->head = ringbuf->last_retired_head; | ||
65 | ringbuf->last_retired_head = -1; | ||
66 | } | ||
67 | |||
68 | ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, | ||
69 | ringbuf->tail, ringbuf->size); | ||
59 | } | 70 | } |
60 | 71 | ||
61 | int intel_ring_space(struct intel_ringbuffer *ringbuf) | 72 | int intel_ring_space(struct intel_ringbuffer *ringbuf) |
62 | { | 73 | { |
63 | return __intel_ring_space(ringbuf->head & HEAD_ADDR, | 74 | intel_ring_update_space(ringbuf); |
64 | ringbuf->tail, ringbuf->size); | 75 | return ringbuf->space; |
65 | } | 76 | } |
66 | 77 | ||
67 | bool intel_ring_stopped(struct intel_engine_cs *ring) | 78 | bool intel_ring_stopped(struct intel_engine_cs *ring) |
@@ -592,10 +603,10 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
592 | goto out; | 603 | goto out; |
593 | } | 604 | } |
594 | 605 | ||
606 | ringbuf->last_retired_head = -1; | ||
595 | ringbuf->head = I915_READ_HEAD(ring); | 607 | ringbuf->head = I915_READ_HEAD(ring); |
596 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 608 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
597 | ringbuf->space = intel_ring_space(ringbuf); | 609 | intel_ring_update_space(ringbuf); |
598 | ringbuf->last_retired_head = -1; | ||
599 | 610 | ||
600 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 611 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
601 | 612 | ||
@@ -627,8 +638,7 @@ intel_init_pipe_control(struct intel_engine_cs *ring) | |||
627 | { | 638 | { |
628 | int ret; | 639 | int ret; |
629 | 640 | ||
630 | if (ring->scratch.obj) | 641 | WARN_ON(ring->scratch.obj); |
631 | return 0; | ||
632 | 642 | ||
633 | ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); | 643 | ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); |
634 | if (ring->scratch.obj == NULL) { | 644 | if (ring->scratch.obj == NULL) { |
@@ -672,7 +682,7 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
672 | struct drm_i915_private *dev_priv = dev->dev_private; | 682 | struct drm_i915_private *dev_priv = dev->dev_private; |
673 | struct i915_workarounds *w = &dev_priv->workarounds; | 683 | struct i915_workarounds *w = &dev_priv->workarounds; |
674 | 684 | ||
675 | if (WARN_ON(w->count == 0)) | 685 | if (WARN_ON_ONCE(w->count == 0)) |
676 | return 0; | 686 | return 0; |
677 | 687 | ||
678 | ring->gpu_caches_dirty = true; | 688 | ring->gpu_caches_dirty = true; |
@@ -703,6 +713,22 @@ static int intel_ring_workarounds_emit(struct intel_engine_cs *ring, | |||
703 | return 0; | 713 | return 0; |
704 | } | 714 | } |
705 | 715 | ||
716 | static int intel_rcs_ctx_init(struct intel_engine_cs *ring, | ||
717 | struct intel_context *ctx) | ||
718 | { | ||
719 | int ret; | ||
720 | |||
721 | ret = intel_ring_workarounds_emit(ring, ctx); | ||
722 | if (ret != 0) | ||
723 | return ret; | ||
724 | |||
725 | ret = i915_gem_render_state_init(ring); | ||
726 | if (ret) | ||
727 | DRM_ERROR("init render state: %d\n", ret); | ||
728 | |||
729 | return ret; | ||
730 | } | ||
731 | |||
706 | static int wa_add(struct drm_i915_private *dev_priv, | 732 | static int wa_add(struct drm_i915_private *dev_priv, |
707 | const u32 addr, const u32 mask, const u32 val) | 733 | const u32 addr, const u32 mask, const u32 val) |
708 | { | 734 | { |
@@ -762,9 +788,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) | |||
762 | * workaround for for a possible hang in the unlikely event a TLB | 788 | * workaround for for a possible hang in the unlikely event a TLB |
763 | * invalidation occurs during a PSD flush. | 789 | * invalidation occurs during a PSD flush. |
764 | */ | 790 | */ |
791 | /* WaForceEnableNonCoherent:bdw */ | ||
792 | /* WaHdcDisableFetchWhenMasked:bdw */ | ||
765 | /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */ | 793 | /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */ |
766 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 794 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
767 | HDC_FORCE_NON_COHERENT | | 795 | HDC_FORCE_NON_COHERENT | |
796 | HDC_DONOT_FETCH_MEM_WHEN_MASKED | | ||
768 | (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); | 797 | (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); |
769 | 798 | ||
770 | /* Wa4x4STCOptimizationDisable:bdw */ | 799 | /* Wa4x4STCOptimizationDisable:bdw */ |
@@ -861,12 +890,6 @@ static int init_render_ring(struct intel_engine_cs *ring) | |||
861 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | | 890 | _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | |
862 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); | 891 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
863 | 892 | ||
864 | if (INTEL_INFO(dev)->gen >= 5) { | ||
865 | ret = intel_init_pipe_control(ring); | ||
866 | if (ret) | ||
867 | return ret; | ||
868 | } | ||
869 | |||
870 | if (IS_GEN6(dev)) { | 893 | if (IS_GEN6(dev)) { |
871 | /* From the Sandybridge PRM, volume 1 part 3, page 24: | 894 | /* From the Sandybridge PRM, volume 1 part 3, page 24: |
872 | * "If this bit is set, STCunit will have LRA as replacement | 895 | * "If this bit is set, STCunit will have LRA as replacement |
@@ -918,17 +941,20 @@ static int gen8_rcs_signal(struct intel_engine_cs *signaller, | |||
918 | return ret; | 941 | return ret; |
919 | 942 | ||
920 | for_each_ring(waiter, dev_priv, i) { | 943 | for_each_ring(waiter, dev_priv, i) { |
944 | u32 seqno; | ||
921 | u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; | 945 | u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; |
922 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) | 946 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) |
923 | continue; | 947 | continue; |
924 | 948 | ||
949 | seqno = i915_gem_request_get_seqno( | ||
950 | signaller->outstanding_lazy_request); | ||
925 | intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); | 951 | intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); |
926 | intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | | 952 | intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | |
927 | PIPE_CONTROL_QW_WRITE | | 953 | PIPE_CONTROL_QW_WRITE | |
928 | PIPE_CONTROL_FLUSH_ENABLE); | 954 | PIPE_CONTROL_FLUSH_ENABLE); |
929 | intel_ring_emit(signaller, lower_32_bits(gtt_offset)); | 955 | intel_ring_emit(signaller, lower_32_bits(gtt_offset)); |
930 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); | 956 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); |
931 | intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); | 957 | intel_ring_emit(signaller, seqno); |
932 | intel_ring_emit(signaller, 0); | 958 | intel_ring_emit(signaller, 0); |
933 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 959 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
934 | MI_SEMAPHORE_TARGET(waiter->id)); | 960 | MI_SEMAPHORE_TARGET(waiter->id)); |
@@ -956,16 +982,19 @@ static int gen8_xcs_signal(struct intel_engine_cs *signaller, | |||
956 | return ret; | 982 | return ret; |
957 | 983 | ||
958 | for_each_ring(waiter, dev_priv, i) { | 984 | for_each_ring(waiter, dev_priv, i) { |
985 | u32 seqno; | ||
959 | u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; | 986 | u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; |
960 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) | 987 | if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) |
961 | continue; | 988 | continue; |
962 | 989 | ||
990 | seqno = i915_gem_request_get_seqno( | ||
991 | signaller->outstanding_lazy_request); | ||
963 | intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | | 992 | intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | |
964 | MI_FLUSH_DW_OP_STOREDW); | 993 | MI_FLUSH_DW_OP_STOREDW); |
965 | intel_ring_emit(signaller, lower_32_bits(gtt_offset) | | 994 | intel_ring_emit(signaller, lower_32_bits(gtt_offset) | |
966 | MI_FLUSH_DW_USE_GTT); | 995 | MI_FLUSH_DW_USE_GTT); |
967 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); | 996 | intel_ring_emit(signaller, upper_32_bits(gtt_offset)); |
968 | intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); | 997 | intel_ring_emit(signaller, seqno); |
969 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | | 998 | intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | |
970 | MI_SEMAPHORE_TARGET(waiter->id)); | 999 | MI_SEMAPHORE_TARGET(waiter->id)); |
971 | intel_ring_emit(signaller, 0); | 1000 | intel_ring_emit(signaller, 0); |
@@ -994,9 +1023,11 @@ static int gen6_signal(struct intel_engine_cs *signaller, | |||
994 | for_each_ring(useless, dev_priv, i) { | 1023 | for_each_ring(useless, dev_priv, i) { |
995 | u32 mbox_reg = signaller->semaphore.mbox.signal[i]; | 1024 | u32 mbox_reg = signaller->semaphore.mbox.signal[i]; |
996 | if (mbox_reg != GEN6_NOSYNC) { | 1025 | if (mbox_reg != GEN6_NOSYNC) { |
1026 | u32 seqno = i915_gem_request_get_seqno( | ||
1027 | signaller->outstanding_lazy_request); | ||
997 | intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); | 1028 | intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); |
998 | intel_ring_emit(signaller, mbox_reg); | 1029 | intel_ring_emit(signaller, mbox_reg); |
999 | intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); | 1030 | intel_ring_emit(signaller, seqno); |
1000 | } | 1031 | } |
1001 | } | 1032 | } |
1002 | 1033 | ||
@@ -1031,7 +1062,8 @@ gen6_add_request(struct intel_engine_cs *ring) | |||
1031 | 1062 | ||
1032 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 1063 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
1033 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1064 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1034 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 1065 | intel_ring_emit(ring, |
1066 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
1035 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 1067 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
1036 | __intel_ring_advance(ring); | 1068 | __intel_ring_advance(ring); |
1037 | 1069 | ||
@@ -1149,7 +1181,8 @@ pc_render_add_request(struct intel_engine_cs *ring) | |||
1149 | PIPE_CONTROL_WRITE_FLUSH | | 1181 | PIPE_CONTROL_WRITE_FLUSH | |
1150 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | 1182 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); |
1151 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 1183 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
1152 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 1184 | intel_ring_emit(ring, |
1185 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
1153 | intel_ring_emit(ring, 0); | 1186 | intel_ring_emit(ring, 0); |
1154 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 1187 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
1155 | scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ | 1188 | scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ |
@@ -1168,7 +1201,8 @@ pc_render_add_request(struct intel_engine_cs *ring) | |||
1168 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | 1201 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | |
1169 | PIPE_CONTROL_NOTIFY); | 1202 | PIPE_CONTROL_NOTIFY); |
1170 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 1203 | intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
1171 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 1204 | intel_ring_emit(ring, |
1205 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
1172 | intel_ring_emit(ring, 0); | 1206 | intel_ring_emit(ring, 0); |
1173 | __intel_ring_advance(ring); | 1207 | __intel_ring_advance(ring); |
1174 | 1208 | ||
@@ -1408,7 +1442,8 @@ i9xx_add_request(struct intel_engine_cs *ring) | |||
1408 | 1442 | ||
1409 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); | 1443 | intel_ring_emit(ring, MI_STORE_DWORD_INDEX); |
1410 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | 1444 | intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); |
1411 | intel_ring_emit(ring, ring->outstanding_lazy_seqno); | 1445 | intel_ring_emit(ring, |
1446 | i915_gem_request_get_seqno(ring->outstanding_lazy_request)); | ||
1412 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 1447 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
1413 | __intel_ring_advance(ring); | 1448 | __intel_ring_advance(ring); |
1414 | 1449 | ||
@@ -1789,15 +1824,15 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, | |||
1789 | static int intel_init_ring_buffer(struct drm_device *dev, | 1824 | static int intel_init_ring_buffer(struct drm_device *dev, |
1790 | struct intel_engine_cs *ring) | 1825 | struct intel_engine_cs *ring) |
1791 | { | 1826 | { |
1792 | struct intel_ringbuffer *ringbuf = ring->buffer; | 1827 | struct intel_ringbuffer *ringbuf; |
1793 | int ret; | 1828 | int ret; |
1794 | 1829 | ||
1795 | if (ringbuf == NULL) { | 1830 | WARN_ON(ring->buffer); |
1796 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | 1831 | |
1797 | if (!ringbuf) | 1832 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); |
1798 | return -ENOMEM; | 1833 | if (!ringbuf) |
1799 | ring->buffer = ringbuf; | 1834 | return -ENOMEM; |
1800 | } | 1835 | ring->buffer = ringbuf; |
1801 | 1836 | ||
1802 | ring->dev = dev; | 1837 | ring->dev = dev; |
1803 | INIT_LIST_HEAD(&ring->active_list); | 1838 | INIT_LIST_HEAD(&ring->active_list); |
@@ -1820,21 +1855,21 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1820 | goto error; | 1855 | goto error; |
1821 | } | 1856 | } |
1822 | 1857 | ||
1823 | if (ringbuf->obj == NULL) { | 1858 | WARN_ON(ringbuf->obj); |
1824 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
1825 | if (ret) { | ||
1826 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
1827 | ring->name, ret); | ||
1828 | goto error; | ||
1829 | } | ||
1830 | 1859 | ||
1831 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | 1860 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); |
1832 | if (ret) { | 1861 | if (ret) { |
1833 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | 1862 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", |
1834 | ring->name, ret); | 1863 | ring->name, ret); |
1835 | intel_destroy_ringbuffer_obj(ringbuf); | 1864 | goto error; |
1836 | goto error; | 1865 | } |
1837 | } | 1866 | |
1867 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | ||
1868 | if (ret) { | ||
1869 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | ||
1870 | ring->name, ret); | ||
1871 | intel_destroy_ringbuffer_obj(ringbuf); | ||
1872 | goto error; | ||
1838 | } | 1873 | } |
1839 | 1874 | ||
1840 | /* Workaround an erratum on the i830 which causes a hang if | 1875 | /* Workaround an erratum on the i830 which causes a hang if |
@@ -1849,10 +1884,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1849 | if (ret) | 1884 | if (ret) |
1850 | goto error; | 1885 | goto error; |
1851 | 1886 | ||
1852 | ret = ring->init(ring); | ||
1853 | if (ret) | ||
1854 | goto error; | ||
1855 | |||
1856 | return 0; | 1887 | return 0; |
1857 | 1888 | ||
1858 | error: | 1889 | error: |
@@ -1877,8 +1908,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
1877 | 1908 | ||
1878 | intel_unpin_ringbuffer_obj(ringbuf); | 1909 | intel_unpin_ringbuffer_obj(ringbuf); |
1879 | intel_destroy_ringbuffer_obj(ringbuf); | 1910 | intel_destroy_ringbuffer_obj(ringbuf); |
1880 | ring->preallocated_lazy_request = NULL; | 1911 | i915_gem_request_assign(&ring->outstanding_lazy_request, NULL); |
1881 | ring->outstanding_lazy_seqno = 0; | ||
1882 | 1912 | ||
1883 | if (ring->cleanup) | 1913 | if (ring->cleanup) |
1884 | ring->cleanup(ring); | 1914 | ring->cleanup(ring); |
@@ -1895,38 +1925,27 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1895 | { | 1925 | { |
1896 | struct intel_ringbuffer *ringbuf = ring->buffer; | 1926 | struct intel_ringbuffer *ringbuf = ring->buffer; |
1897 | struct drm_i915_gem_request *request; | 1927 | struct drm_i915_gem_request *request; |
1898 | u32 seqno = 0; | ||
1899 | int ret; | 1928 | int ret; |
1900 | 1929 | ||
1901 | if (ringbuf->last_retired_head != -1) { | 1930 | if (intel_ring_space(ringbuf) >= n) |
1902 | ringbuf->head = ringbuf->last_retired_head; | 1931 | return 0; |
1903 | ringbuf->last_retired_head = -1; | ||
1904 | |||
1905 | ringbuf->space = intel_ring_space(ringbuf); | ||
1906 | if (ringbuf->space >= n) | ||
1907 | return 0; | ||
1908 | } | ||
1909 | 1932 | ||
1910 | list_for_each_entry(request, &ring->request_list, list) { | 1933 | list_for_each_entry(request, &ring->request_list, list) { |
1911 | if (__intel_ring_space(request->tail, ringbuf->tail, | 1934 | if (__intel_ring_space(request->tail, ringbuf->tail, |
1912 | ringbuf->size) >= n) { | 1935 | ringbuf->size) >= n) { |
1913 | seqno = request->seqno; | ||
1914 | break; | 1936 | break; |
1915 | } | 1937 | } |
1916 | } | 1938 | } |
1917 | 1939 | ||
1918 | if (seqno == 0) | 1940 | if (&request->list == &ring->request_list) |
1919 | return -ENOSPC; | 1941 | return -ENOSPC; |
1920 | 1942 | ||
1921 | ret = i915_wait_seqno(ring, seqno); | 1943 | ret = i915_wait_request(request); |
1922 | if (ret) | 1944 | if (ret) |
1923 | return ret; | 1945 | return ret; |
1924 | 1946 | ||
1925 | i915_gem_retire_requests_ring(ring); | 1947 | i915_gem_retire_requests_ring(ring); |
1926 | ringbuf->head = ringbuf->last_retired_head; | ||
1927 | ringbuf->last_retired_head = -1; | ||
1928 | 1948 | ||
1929 | ringbuf->space = intel_ring_space(ringbuf); | ||
1930 | return 0; | 1949 | return 0; |
1931 | } | 1950 | } |
1932 | 1951 | ||
@@ -1952,14 +1971,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1952 | * case by choosing an insanely large timeout. */ | 1971 | * case by choosing an insanely large timeout. */ |
1953 | end = jiffies + 60 * HZ; | 1972 | end = jiffies + 60 * HZ; |
1954 | 1973 | ||
1974 | ret = 0; | ||
1955 | trace_i915_ring_wait_begin(ring); | 1975 | trace_i915_ring_wait_begin(ring); |
1956 | do { | 1976 | do { |
1977 | if (intel_ring_space(ringbuf) >= n) | ||
1978 | break; | ||
1957 | ringbuf->head = I915_READ_HEAD(ring); | 1979 | ringbuf->head = I915_READ_HEAD(ring); |
1958 | ringbuf->space = intel_ring_space(ringbuf); | 1980 | if (intel_ring_space(ringbuf) >= n) |
1959 | if (ringbuf->space >= n) { | ||
1960 | ret = 0; | ||
1961 | break; | 1981 | break; |
1962 | } | ||
1963 | 1982 | ||
1964 | msleep(1); | 1983 | msleep(1); |
1965 | 1984 | ||
@@ -2000,19 +2019,19 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | |||
2000 | iowrite32(MI_NOOP, virt++); | 2019 | iowrite32(MI_NOOP, virt++); |
2001 | 2020 | ||
2002 | ringbuf->tail = 0; | 2021 | ringbuf->tail = 0; |
2003 | ringbuf->space = intel_ring_space(ringbuf); | 2022 | intel_ring_update_space(ringbuf); |
2004 | 2023 | ||
2005 | return 0; | 2024 | return 0; |
2006 | } | 2025 | } |
2007 | 2026 | ||
2008 | int intel_ring_idle(struct intel_engine_cs *ring) | 2027 | int intel_ring_idle(struct intel_engine_cs *ring) |
2009 | { | 2028 | { |
2010 | u32 seqno; | 2029 | struct drm_i915_gem_request *req; |
2011 | int ret; | 2030 | int ret; |
2012 | 2031 | ||
2013 | /* We need to add any requests required to flush the objects and ring */ | 2032 | /* We need to add any requests required to flush the objects and ring */ |
2014 | if (ring->outstanding_lazy_seqno) { | 2033 | if (ring->outstanding_lazy_request) { |
2015 | ret = i915_add_request(ring, NULL); | 2034 | ret = i915_add_request(ring); |
2016 | if (ret) | 2035 | if (ret) |
2017 | return ret; | 2036 | return ret; |
2018 | } | 2037 | } |
@@ -2021,30 +2040,39 @@ int intel_ring_idle(struct intel_engine_cs *ring) | |||
2021 | if (list_empty(&ring->request_list)) | 2040 | if (list_empty(&ring->request_list)) |
2022 | return 0; | 2041 | return 0; |
2023 | 2042 | ||
2024 | seqno = list_entry(ring->request_list.prev, | 2043 | req = list_entry(ring->request_list.prev, |
2025 | struct drm_i915_gem_request, | 2044 | struct drm_i915_gem_request, |
2026 | list)->seqno; | 2045 | list); |
2027 | 2046 | ||
2028 | return i915_wait_seqno(ring, seqno); | 2047 | return i915_wait_request(req); |
2029 | } | 2048 | } |
2030 | 2049 | ||
2031 | static int | 2050 | static int |
2032 | intel_ring_alloc_seqno(struct intel_engine_cs *ring) | 2051 | intel_ring_alloc_request(struct intel_engine_cs *ring) |
2033 | { | 2052 | { |
2034 | if (ring->outstanding_lazy_seqno) | 2053 | int ret; |
2054 | struct drm_i915_gem_request *request; | ||
2055 | struct drm_i915_private *dev_private = ring->dev->dev_private; | ||
2056 | |||
2057 | if (ring->outstanding_lazy_request) | ||
2035 | return 0; | 2058 | return 0; |
2036 | 2059 | ||
2037 | if (ring->preallocated_lazy_request == NULL) { | 2060 | request = kzalloc(sizeof(*request), GFP_KERNEL); |
2038 | struct drm_i915_gem_request *request; | 2061 | if (request == NULL) |
2062 | return -ENOMEM; | ||
2039 | 2063 | ||
2040 | request = kmalloc(sizeof(*request), GFP_KERNEL); | 2064 | kref_init(&request->ref); |
2041 | if (request == NULL) | 2065 | request->ring = ring; |
2042 | return -ENOMEM; | 2066 | request->uniq = dev_private->request_uniq++; |
2043 | 2067 | ||
2044 | ring->preallocated_lazy_request = request; | 2068 | ret = i915_gem_get_seqno(ring->dev, &request->seqno); |
2069 | if (ret) { | ||
2070 | kfree(request); | ||
2071 | return ret; | ||
2045 | } | 2072 | } |
2046 | 2073 | ||
2047 | return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); | 2074 | ring->outstanding_lazy_request = request; |
2075 | return 0; | ||
2048 | } | 2076 | } |
2049 | 2077 | ||
2050 | static int __intel_ring_prepare(struct intel_engine_cs *ring, | 2078 | static int __intel_ring_prepare(struct intel_engine_cs *ring, |
@@ -2084,7 +2112,7 @@ int intel_ring_begin(struct intel_engine_cs *ring, | |||
2084 | return ret; | 2112 | return ret; |
2085 | 2113 | ||
2086 | /* Preallocate the olr before touching the ring */ | 2114 | /* Preallocate the olr before touching the ring */ |
2087 | ret = intel_ring_alloc_seqno(ring); | 2115 | ret = intel_ring_alloc_request(ring); |
2088 | if (ret) | 2116 | if (ret) |
2089 | return ret; | 2117 | return ret; |
2090 | 2118 | ||
@@ -2119,7 +2147,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) | |||
2119 | struct drm_device *dev = ring->dev; | 2147 | struct drm_device *dev = ring->dev; |
2120 | struct drm_i915_private *dev_priv = dev->dev_private; | 2148 | struct drm_i915_private *dev_priv = dev->dev_private; |
2121 | 2149 | ||
2122 | BUG_ON(ring->outstanding_lazy_seqno); | 2150 | BUG_ON(ring->outstanding_lazy_request); |
2123 | 2151 | ||
2124 | if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { | 2152 | if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { |
2125 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); | 2153 | I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
@@ -2341,7 +2369,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2341 | } | 2369 | } |
2342 | } | 2370 | } |
2343 | 2371 | ||
2344 | ring->init_context = intel_ring_workarounds_emit; | 2372 | ring->init_context = intel_rcs_ctx_init; |
2345 | ring->add_request = gen6_add_request; | 2373 | ring->add_request = gen6_add_request; |
2346 | ring->flush = gen8_render_ring_flush; | 2374 | ring->flush = gen8_render_ring_flush; |
2347 | ring->irq_get = gen8_ring_get_irq; | 2375 | ring->irq_get = gen8_ring_get_irq; |
@@ -2426,7 +2454,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2426 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; | 2454 | ring->dispatch_execbuffer = i830_dispatch_execbuffer; |
2427 | else | 2455 | else |
2428 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; | 2456 | ring->dispatch_execbuffer = i915_dispatch_execbuffer; |
2429 | ring->init = init_render_ring; | 2457 | ring->init_hw = init_render_ring; |
2430 | ring->cleanup = render_ring_cleanup; | 2458 | ring->cleanup = render_ring_cleanup; |
2431 | 2459 | ||
2432 | /* Workaround batchbuffer to combat CS tlb bug. */ | 2460 | /* Workaround batchbuffer to combat CS tlb bug. */ |
@@ -2448,7 +2476,17 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2448 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); | 2476 | ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); |
2449 | } | 2477 | } |
2450 | 2478 | ||
2451 | return intel_init_ring_buffer(dev, ring); | 2479 | ret = intel_init_ring_buffer(dev, ring); |
2480 | if (ret) | ||
2481 | return ret; | ||
2482 | |||
2483 | if (INTEL_INFO(dev)->gen >= 5) { | ||
2484 | ret = intel_init_pipe_control(ring); | ||
2485 | if (ret) | ||
2486 | return ret; | ||
2487 | } | ||
2488 | |||
2489 | return 0; | ||
2452 | } | 2490 | } |
2453 | 2491 | ||
2454 | int intel_init_bsd_ring_buffer(struct drm_device *dev) | 2492 | int intel_init_bsd_ring_buffer(struct drm_device *dev) |
@@ -2519,7 +2557,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
2519 | } | 2557 | } |
2520 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; | 2558 | ring->dispatch_execbuffer = i965_dispatch_execbuffer; |
2521 | } | 2559 | } |
2522 | ring->init = init_ring_common; | 2560 | ring->init_hw = init_ring_common; |
2523 | 2561 | ||
2524 | return intel_init_ring_buffer(dev, ring); | 2562 | return intel_init_ring_buffer(dev, ring); |
2525 | } | 2563 | } |
@@ -2558,7 +2596,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) | |||
2558 | ring->semaphore.signal = gen8_xcs_signal; | 2596 | ring->semaphore.signal = gen8_xcs_signal; |
2559 | GEN8_RING_SEMAPHORE_INIT; | 2597 | GEN8_RING_SEMAPHORE_INIT; |
2560 | } | 2598 | } |
2561 | ring->init = init_ring_common; | 2599 | ring->init_hw = init_ring_common; |
2562 | 2600 | ||
2563 | return intel_init_ring_buffer(dev, ring); | 2601 | return intel_init_ring_buffer(dev, ring); |
2564 | } | 2602 | } |
@@ -2615,7 +2653,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) | |||
2615 | ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; | 2653 | ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; |
2616 | } | 2654 | } |
2617 | } | 2655 | } |
2618 | ring->init = init_ring_common; | 2656 | ring->init_hw = init_ring_common; |
2619 | 2657 | ||
2620 | return intel_init_ring_buffer(dev, ring); | 2658 | return intel_init_ring_buffer(dev, ring); |
2621 | } | 2659 | } |
@@ -2666,7 +2704,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) | |||
2666 | ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; | 2704 | ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; |
2667 | } | 2705 | } |
2668 | } | 2706 | } |
2669 | ring->init = init_ring_common; | 2707 | ring->init_hw = init_ring_common; |
2670 | 2708 | ||
2671 | return intel_init_ring_buffer(dev, ring); | 2709 | return intel_init_ring_buffer(dev, ring); |
2672 | } | 2710 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index fe426cff598b..6dbb6f462007 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -142,11 +142,11 @@ struct intel_engine_cs { | |||
142 | 142 | ||
143 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ | 143 | unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
144 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ | 144 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
145 | u32 trace_irq_seqno; | 145 | struct drm_i915_gem_request *trace_irq_req; |
146 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); | 146 | bool __must_check (*irq_get)(struct intel_engine_cs *ring); |
147 | void (*irq_put)(struct intel_engine_cs *ring); | 147 | void (*irq_put)(struct intel_engine_cs *ring); |
148 | 148 | ||
149 | int (*init)(struct intel_engine_cs *ring); | 149 | int (*init_hw)(struct intel_engine_cs *ring); |
150 | 150 | ||
151 | int (*init_context)(struct intel_engine_cs *ring, | 151 | int (*init_context)(struct intel_engine_cs *ring, |
152 | struct intel_context *ctx); | 152 | struct intel_context *ctx); |
@@ -251,7 +251,7 @@ struct intel_engine_cs { | |||
251 | * ringbuffer. | 251 | * ringbuffer. |
252 | * | 252 | * |
253 | * Includes buffers having the contents of their GPU caches | 253 | * Includes buffers having the contents of their GPU caches |
254 | * flushed, not necessarily primitives. last_rendering_seqno | 254 | * flushed, not necessarily primitives. last_read_req |
255 | * represents when the rendering involved will be completed. | 255 | * represents when the rendering involved will be completed. |
256 | * | 256 | * |
257 | * A reference is held on the buffer while on this list. | 257 | * A reference is held on the buffer while on this list. |
@@ -267,8 +267,7 @@ struct intel_engine_cs { | |||
267 | /** | 267 | /** |
268 | * Do we have some not yet emitted requests outstanding? | 268 | * Do we have some not yet emitted requests outstanding? |
269 | */ | 269 | */ |
270 | struct drm_i915_gem_request *preallocated_lazy_request; | 270 | struct drm_i915_gem_request *outstanding_lazy_request; |
271 | u32 outstanding_lazy_seqno; | ||
272 | bool gpu_caches_dirty; | 271 | bool gpu_caches_dirty; |
273 | bool fbc_dirty; | 272 | bool fbc_dirty; |
274 | 273 | ||
@@ -408,6 +407,7 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring) | |||
408 | ringbuf->tail &= ringbuf->size - 1; | 407 | ringbuf->tail &= ringbuf->size - 1; |
409 | } | 408 | } |
410 | int __intel_ring_space(int head, int tail, int size); | 409 | int __intel_ring_space(int head, int tail, int size); |
410 | void intel_ring_update_space(struct intel_ringbuffer *ringbuf); | ||
411 | int intel_ring_space(struct intel_ringbuffer *ringbuf); | 411 | int intel_ring_space(struct intel_ringbuffer *ringbuf); |
412 | bool intel_ring_stopped(struct intel_engine_cs *ring); | 412 | bool intel_ring_stopped(struct intel_engine_cs *ring); |
413 | void __intel_ring_advance(struct intel_engine_cs *ring); | 413 | void __intel_ring_advance(struct intel_engine_cs *ring); |
@@ -436,16 +436,11 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) | |||
436 | return ringbuf->tail; | 436 | return ringbuf->tail; |
437 | } | 437 | } |
438 | 438 | ||
439 | static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) | 439 | static inline struct drm_i915_gem_request * |
440 | intel_ring_get_request(struct intel_engine_cs *ring) | ||
440 | { | 441 | { |
441 | BUG_ON(ring->outstanding_lazy_seqno == 0); | 442 | BUG_ON(ring->outstanding_lazy_request == NULL); |
442 | return ring->outstanding_lazy_seqno; | 443 | return ring->outstanding_lazy_request; |
443 | } | ||
444 | |||
445 | static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno) | ||
446 | { | ||
447 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) | ||
448 | ring->trace_irq_seqno = seqno; | ||
449 | } | 444 | } |
450 | 445 | ||
451 | #endif /* _INTEL_RINGBUFFER_H_ */ | 446 | #endif /* _INTEL_RINGBUFFER_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index f5a78d53e297..6aa3a81df485 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -118,7 +118,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, | |||
118 | } | 118 | } |
119 | 119 | ||
120 | /** | 120 | /** |
121 | * intel_display_power_is_enabled - unlocked check for a power domain | 121 | * intel_display_power_is_enabled - check for a power domain |
122 | * @dev_priv: i915 device instance | 122 | * @dev_priv: i915 device instance |
123 | * @domain: power domain to check | 123 | * @domain: power domain to check |
124 | * | 124 | * |
@@ -633,7 +633,7 @@ static void check_power_well_state(struct drm_i915_private *dev_priv, | |||
633 | return; | 633 | return; |
634 | 634 | ||
635 | mismatch: | 635 | mismatch: |
636 | WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", | 636 | I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", |
637 | power_well->name, power_well->always_on, enabled, | 637 | power_well->name, power_well->always_on, enabled, |
638 | power_well->count, i915.disable_power_well); | 638 | power_well->count, i915.disable_power_well); |
639 | } | 639 | } |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7d9c340f7693..c18e57d36c2c 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -1004,7 +1004,7 @@ intel_post_enable_primary(struct drm_crtc *crtc) | |||
1004 | hsw_enable_ips(intel_crtc); | 1004 | hsw_enable_ips(intel_crtc); |
1005 | 1005 | ||
1006 | mutex_lock(&dev->struct_mutex); | 1006 | mutex_lock(&dev->struct_mutex); |
1007 | intel_update_fbc(dev); | 1007 | intel_fbc_update(dev); |
1008 | mutex_unlock(&dev->struct_mutex); | 1008 | mutex_unlock(&dev->struct_mutex); |
1009 | } | 1009 | } |
1010 | 1010 | ||
@@ -1017,7 +1017,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc) | |||
1017 | 1017 | ||
1018 | mutex_lock(&dev->struct_mutex); | 1018 | mutex_lock(&dev->struct_mutex); |
1019 | if (dev_priv->fbc.plane == intel_crtc->plane) | 1019 | if (dev_priv->fbc.plane == intel_crtc->plane) |
1020 | intel_disable_fbc(dev); | 1020 | intel_fbc_disable(dev); |
1021 | mutex_unlock(&dev->struct_mutex); | 1021 | mutex_unlock(&dev->struct_mutex); |
1022 | 1022 | ||
1023 | /* | 1023 | /* |
@@ -1096,9 +1096,9 @@ static int | |||
1096 | intel_check_sprite_plane(struct drm_plane *plane, | 1096 | intel_check_sprite_plane(struct drm_plane *plane, |
1097 | struct intel_plane_state *state) | 1097 | struct intel_plane_state *state) |
1098 | { | 1098 | { |
1099 | struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc); | 1099 | struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); |
1100 | struct intel_plane *intel_plane = to_intel_plane(plane); | 1100 | struct intel_plane *intel_plane = to_intel_plane(plane); |
1101 | struct drm_framebuffer *fb = state->fb; | 1101 | struct drm_framebuffer *fb = state->base.fb; |
1102 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 1102 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
1103 | int crtc_x, crtc_y; | 1103 | int crtc_x, crtc_y; |
1104 | unsigned int crtc_w, crtc_h; | 1104 | unsigned int crtc_w, crtc_h; |
@@ -1109,7 +1109,12 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
1109 | const struct drm_rect *clip = &state->clip; | 1109 | const struct drm_rect *clip = &state->clip; |
1110 | int hscale, vscale; | 1110 | int hscale, vscale; |
1111 | int max_scale, min_scale; | 1111 | int max_scale, min_scale; |
1112 | int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | 1112 | int pixel_size; |
1113 | |||
1114 | if (!fb) { | ||
1115 | state->visible = false; | ||
1116 | return 0; | ||
1117 | } | ||
1113 | 1118 | ||
1114 | /* Don't modify another pipe's plane */ | 1119 | /* Don't modify another pipe's plane */ |
1115 | if (intel_plane->pipe != intel_crtc->pipe) { | 1120 | if (intel_plane->pipe != intel_crtc->pipe) { |
@@ -1232,6 +1237,7 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
1232 | if (src_w < 3 || src_h < 3) | 1237 | if (src_w < 3 || src_h < 3) |
1233 | state->visible = false; | 1238 | state->visible = false; |
1234 | 1239 | ||
1240 | pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); | ||
1235 | width_bytes = ((src_x * pixel_size) & 63) + | 1241 | width_bytes = ((src_x * pixel_size) & 63) + |
1236 | src_w * pixel_size; | 1242 | src_w * pixel_size; |
1237 | 1243 | ||
@@ -1257,53 +1263,17 @@ intel_check_sprite_plane(struct drm_plane *plane, | |||
1257 | return 0; | 1263 | return 0; |
1258 | } | 1264 | } |
1259 | 1265 | ||
1260 | static int | ||
1261 | intel_prepare_sprite_plane(struct drm_plane *plane, | ||
1262 | struct intel_plane_state *state) | ||
1263 | { | ||
1264 | struct drm_device *dev = plane->dev; | ||
1265 | struct drm_crtc *crtc = state->crtc; | ||
1266 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1267 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
1268 | enum pipe pipe = intel_crtc->pipe; | ||
1269 | struct drm_framebuffer *fb = state->fb; | ||
1270 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | ||
1271 | struct drm_i915_gem_object *old_obj = intel_plane->obj; | ||
1272 | int ret; | ||
1273 | |||
1274 | if (old_obj != obj) { | ||
1275 | mutex_lock(&dev->struct_mutex); | ||
1276 | |||
1277 | /* Note that this will apply the VT-d workaround for scanouts, | ||
1278 | * which is more restrictive than required for sprites. (The | ||
1279 | * primary plane requires 256KiB alignment with 64 PTE padding, | ||
1280 | * the sprite planes only require 128KiB alignment and 32 PTE | ||
1281 | * padding. | ||
1282 | */ | ||
1283 | ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); | ||
1284 | if (ret == 0) | ||
1285 | i915_gem_track_fb(old_obj, obj, | ||
1286 | INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1287 | mutex_unlock(&dev->struct_mutex); | ||
1288 | if (ret) | ||
1289 | return ret; | ||
1290 | } | ||
1291 | |||
1292 | return 0; | ||
1293 | } | ||
1294 | |||
1295 | static void | 1266 | static void |
1296 | intel_commit_sprite_plane(struct drm_plane *plane, | 1267 | intel_commit_sprite_plane(struct drm_plane *plane, |
1297 | struct intel_plane_state *state) | 1268 | struct intel_plane_state *state) |
1298 | { | 1269 | { |
1299 | struct drm_device *dev = plane->dev; | 1270 | struct drm_device *dev = plane->dev; |
1300 | struct drm_crtc *crtc = state->crtc; | 1271 | struct drm_crtc *crtc = state->base.crtc; |
1301 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 1272 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
1302 | struct intel_plane *intel_plane = to_intel_plane(plane); | 1273 | struct intel_plane *intel_plane = to_intel_plane(plane); |
1303 | enum pipe pipe = intel_crtc->pipe; | 1274 | enum pipe pipe = intel_crtc->pipe; |
1304 | struct drm_framebuffer *fb = state->fb; | 1275 | struct drm_framebuffer *fb = state->base.fb; |
1305 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 1276 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
1306 | struct drm_i915_gem_object *old_obj = intel_plane->obj; | ||
1307 | int crtc_x, crtc_y; | 1277 | int crtc_x, crtc_y; |
1308 | unsigned int crtc_w, crtc_h; | 1278 | unsigned int crtc_w, crtc_h; |
1309 | uint32_t src_x, src_y, src_w, src_h; | 1279 | uint32_t src_x, src_y, src_w, src_h; |
@@ -1312,6 +1282,17 @@ intel_commit_sprite_plane(struct drm_plane *plane, | |||
1312 | bool primary_enabled; | 1282 | bool primary_enabled; |
1313 | 1283 | ||
1314 | /* | 1284 | /* |
1285 | * 'prepare' is never called when plane is being disabled, so we need | ||
1286 | * to handle frontbuffer tracking here | ||
1287 | */ | ||
1288 | if (!fb) { | ||
1289 | mutex_lock(&dev->struct_mutex); | ||
1290 | i915_gem_track_fb(intel_fb_obj(plane->fb), NULL, | ||
1291 | INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1292 | mutex_unlock(&dev->struct_mutex); | ||
1293 | } | ||
1294 | |||
1295 | /* | ||
1315 | * If the sprite is completely covering the primary plane, | 1296 | * If the sprite is completely covering the primary plane, |
1316 | * we can disable the primary and save power. | 1297 | * we can disable the primary and save power. |
1317 | */ | 1298 | */ |
@@ -1361,112 +1342,6 @@ intel_commit_sprite_plane(struct drm_plane *plane, | |||
1361 | if (!primary_was_enabled && primary_enabled) | 1342 | if (!primary_was_enabled && primary_enabled) |
1362 | intel_post_enable_primary(crtc); | 1343 | intel_post_enable_primary(crtc); |
1363 | } | 1344 | } |
1364 | |||
1365 | /* Unpin old obj after new one is active to avoid ugliness */ | ||
1366 | if (old_obj && old_obj != obj) { | ||
1367 | |||
1368 | /* | ||
1369 | * It's fairly common to simply update the position of | ||
1370 | * an existing object. In that case, we don't need to | ||
1371 | * wait for vblank to avoid ugliness, we only need to | ||
1372 | * do the pin & ref bookkeeping. | ||
1373 | */ | ||
1374 | if (intel_crtc->active) | ||
1375 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
1376 | |||
1377 | mutex_lock(&dev->struct_mutex); | ||
1378 | intel_unpin_fb_obj(old_obj); | ||
1379 | mutex_unlock(&dev->struct_mutex); | ||
1380 | } | ||
1381 | } | ||
1382 | |||
1383 | static int | ||
1384 | intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | ||
1385 | struct drm_framebuffer *fb, int crtc_x, int crtc_y, | ||
1386 | unsigned int crtc_w, unsigned int crtc_h, | ||
1387 | uint32_t src_x, uint32_t src_y, | ||
1388 | uint32_t src_w, uint32_t src_h) | ||
1389 | { | ||
1390 | struct intel_plane_state state; | ||
1391 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
1392 | int ret; | ||
1393 | |||
1394 | state.crtc = crtc; | ||
1395 | state.fb = fb; | ||
1396 | |||
1397 | /* sample coordinates in 16.16 fixed point */ | ||
1398 | state.src.x1 = src_x; | ||
1399 | state.src.x2 = src_x + src_w; | ||
1400 | state.src.y1 = src_y; | ||
1401 | state.src.y2 = src_y + src_h; | ||
1402 | |||
1403 | /* integer pixels */ | ||
1404 | state.dst.x1 = crtc_x; | ||
1405 | state.dst.x2 = crtc_x + crtc_w; | ||
1406 | state.dst.y1 = crtc_y; | ||
1407 | state.dst.y2 = crtc_y + crtc_h; | ||
1408 | |||
1409 | state.clip.x1 = 0; | ||
1410 | state.clip.y1 = 0; | ||
1411 | state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0; | ||
1412 | state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0; | ||
1413 | state.orig_src = state.src; | ||
1414 | state.orig_dst = state.dst; | ||
1415 | |||
1416 | ret = intel_check_sprite_plane(plane, &state); | ||
1417 | if (ret) | ||
1418 | return ret; | ||
1419 | |||
1420 | ret = intel_prepare_sprite_plane(plane, &state); | ||
1421 | if (ret) | ||
1422 | return ret; | ||
1423 | |||
1424 | intel_commit_sprite_plane(plane, &state); | ||
1425 | return 0; | ||
1426 | } | ||
1427 | |||
1428 | static int | ||
1429 | intel_disable_plane(struct drm_plane *plane) | ||
1430 | { | ||
1431 | struct drm_device *dev = plane->dev; | ||
1432 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
1433 | struct intel_crtc *intel_crtc; | ||
1434 | enum pipe pipe; | ||
1435 | |||
1436 | if (!plane->fb) | ||
1437 | return 0; | ||
1438 | |||
1439 | if (WARN_ON(!plane->crtc)) | ||
1440 | return -EINVAL; | ||
1441 | |||
1442 | intel_crtc = to_intel_crtc(plane->crtc); | ||
1443 | pipe = intel_crtc->pipe; | ||
1444 | |||
1445 | if (intel_crtc->active) { | ||
1446 | bool primary_was_enabled = intel_crtc->primary_enabled; | ||
1447 | |||
1448 | intel_crtc->primary_enabled = true; | ||
1449 | |||
1450 | intel_plane->disable_plane(plane, plane->crtc); | ||
1451 | |||
1452 | if (!primary_was_enabled && intel_crtc->primary_enabled) | ||
1453 | intel_post_enable_primary(plane->crtc); | ||
1454 | } | ||
1455 | |||
1456 | if (intel_plane->obj) { | ||
1457 | if (intel_crtc->active) | ||
1458 | intel_wait_for_vblank(dev, intel_plane->pipe); | ||
1459 | |||
1460 | mutex_lock(&dev->struct_mutex); | ||
1461 | intel_unpin_fb_obj(intel_plane->obj); | ||
1462 | i915_gem_track_fb(intel_plane->obj, NULL, | ||
1463 | INTEL_FRONTBUFFER_SPRITE(pipe)); | ||
1464 | mutex_unlock(&dev->struct_mutex); | ||
1465 | |||
1466 | intel_plane->obj = NULL; | ||
1467 | } | ||
1468 | |||
1469 | return 0; | ||
1470 | } | 1345 | } |
1471 | 1346 | ||
1472 | static void intel_destroy_plane(struct drm_plane *plane) | 1347 | static void intel_destroy_plane(struct drm_plane *plane) |
@@ -1576,14 +1451,6 @@ int intel_plane_restore(struct drm_plane *plane) | |||
1576 | intel_plane->src_w, intel_plane->src_h); | 1451 | intel_plane->src_w, intel_plane->src_h); |
1577 | } | 1452 | } |
1578 | 1453 | ||
1579 | void intel_plane_disable(struct drm_plane *plane) | ||
1580 | { | ||
1581 | if (!plane->crtc || !plane->fb) | ||
1582 | return; | ||
1583 | |||
1584 | intel_disable_plane(plane); | ||
1585 | } | ||
1586 | |||
1587 | static const struct drm_plane_funcs intel_plane_funcs = { | 1454 | static const struct drm_plane_funcs intel_plane_funcs = { |
1588 | .update_plane = intel_update_plane, | 1455 | .update_plane = intel_update_plane, |
1589 | .disable_plane = intel_disable_plane, | 1456 | .disable_plane = intel_disable_plane, |
@@ -1720,6 +1587,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) | |||
1720 | intel_plane->pipe = pipe; | 1587 | intel_plane->pipe = pipe; |
1721 | intel_plane->plane = plane; | 1588 | intel_plane->plane = plane; |
1722 | intel_plane->rotation = BIT(DRM_ROTATE_0); | 1589 | intel_plane->rotation = BIT(DRM_ROTATE_0); |
1590 | intel_plane->check_plane = intel_check_sprite_plane; | ||
1591 | intel_plane->commit_plane = intel_commit_sprite_plane; | ||
1723 | possible_crtcs = (1 << pipe); | 1592 | possible_crtcs = (1 << pipe); |
1724 | ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, | 1593 | ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, |
1725 | &intel_plane_funcs, | 1594 | &intel_plane_funcs, |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 46de8d75b4bf..e9561de382aa 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -647,9 +647,9 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) | |||
647 | 647 | ||
648 | #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ | 648 | #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ |
649 | (REG_RANGE((reg), 0x2000, 0x4000) || \ | 649 | (REG_RANGE((reg), 0x2000, 0x4000) || \ |
650 | REG_RANGE((reg), 0x5000, 0x8000) || \ | 650 | REG_RANGE((reg), 0x5200, 0x8000) || \ |
651 | REG_RANGE((reg), 0x8300, 0x8500) || \ | 651 | REG_RANGE((reg), 0x8300, 0x8500) || \ |
652 | REG_RANGE((reg), 0xB000, 0xC000) || \ | 652 | REG_RANGE((reg), 0xB000, 0xB480) || \ |
653 | REG_RANGE((reg), 0xE000, 0xE800)) | 653 | REG_RANGE((reg), 0xE000, 0xE800)) |
654 | 654 | ||
655 | #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ | 655 | #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ |
@@ -658,17 +658,14 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) | |||
658 | REG_RANGE((reg), 0x12000, 0x14000) || \ | 658 | REG_RANGE((reg), 0x12000, 0x14000) || \ |
659 | REG_RANGE((reg), 0x1A000, 0x1C000) || \ | 659 | REG_RANGE((reg), 0x1A000, 0x1C000) || \ |
660 | REG_RANGE((reg), 0x1E800, 0x1EA00) || \ | 660 | REG_RANGE((reg), 0x1E800, 0x1EA00) || \ |
661 | REG_RANGE((reg), 0x30000, 0x40000)) | 661 | REG_RANGE((reg), 0x30000, 0x38000)) |
662 | 662 | ||
663 | #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ | 663 | #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ |
664 | (REG_RANGE((reg), 0x4000, 0x5000) || \ | 664 | (REG_RANGE((reg), 0x4000, 0x5000) || \ |
665 | REG_RANGE((reg), 0x8000, 0x8300) || \ | 665 | REG_RANGE((reg), 0x8000, 0x8300) || \ |
666 | REG_RANGE((reg), 0x8500, 0x8600) || \ | 666 | REG_RANGE((reg), 0x8500, 0x8600) || \ |
667 | REG_RANGE((reg), 0x9000, 0xB000) || \ | 667 | REG_RANGE((reg), 0x9000, 0xB000) || \ |
668 | REG_RANGE((reg), 0xC000, 0xC800) || \ | 668 | REG_RANGE((reg), 0xF000, 0x10000)) |
669 | REG_RANGE((reg), 0xF000, 0x10000) || \ | ||
670 | REG_RANGE((reg), 0x14000, 0x14400) || \ | ||
671 | REG_RANGE((reg), 0x22000, 0x24000)) | ||
672 | 669 | ||
673 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ | 670 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ |
674 | REG_RANGE((reg), 0xB00, 0x2000) | 671 | REG_RANGE((reg), 0xB00, 0x2000) |
@@ -1202,7 +1199,7 @@ void intel_uncore_init(struct drm_device *dev) | |||
1202 | 1199 | ||
1203 | switch (INTEL_INFO(dev)->gen) { | 1200 | switch (INTEL_INFO(dev)->gen) { |
1204 | default: | 1201 | default: |
1205 | WARN_ON(1); | 1202 | MISSING_CASE(INTEL_INFO(dev)->gen); |
1206 | return; | 1203 | return; |
1207 | case 9: | 1204 | case 9: |
1208 | ASSIGN_WRITE_MMIO_VFUNCS(gen9); | 1205 | ASSIGN_WRITE_MMIO_VFUNCS(gen9); |
@@ -1300,7 +1297,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, | |||
1300 | reg->val = I915_READ8(reg->offset); | 1297 | reg->val = I915_READ8(reg->offset); |
1301 | break; | 1298 | break; |
1302 | default: | 1299 | default: |
1303 | WARN_ON(1); | 1300 | MISSING_CASE(entry->size); |
1304 | ret = -EINVAL; | 1301 | ret = -EINVAL; |
1305 | goto out; | 1302 | goto out; |
1306 | } | 1303 | } |