diff options
Diffstat (limited to 'drivers/gpu')
49 files changed, 2366 insertions, 983 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3b5176dd1c86..e41a2d0311f8 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -29,11 +29,17 @@ config DRM_USB | |||
29 | config DRM_KMS_HELPER | 29 | config DRM_KMS_HELPER |
30 | tristate | 30 | tristate |
31 | depends on DRM | 31 | depends on DRM |
32 | help | ||
33 | CRTC helpers for KMS drivers. | ||
34 | |||
35 | config DRM_KMS_FB_HELPER | ||
36 | bool | ||
37 | depends on DRM_KMS_HELPER | ||
32 | select FB | 38 | select FB |
33 | select FRAMEBUFFER_CONSOLE if !EXPERT | 39 | select FRAMEBUFFER_CONSOLE if !EXPERT |
34 | select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE | 40 | select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE |
35 | help | 41 | help |
36 | FB and CRTC helpers for KMS drivers. | 42 | FBDEV helpers for KMS drivers. |
37 | 43 | ||
38 | config DRM_LOAD_EDID_FIRMWARE | 44 | config DRM_LOAD_EDID_FIRMWARE |
39 | bool "Allow to specify an EDID data set instead of probing for it" | 45 | bool "Allow to specify an EDID data set instead of probing for it" |
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER | |||
64 | config DRM_KMS_CMA_HELPER | 70 | config DRM_KMS_CMA_HELPER |
65 | bool | 71 | bool |
66 | select DRM_GEM_CMA_HELPER | 72 | select DRM_GEM_CMA_HELPER |
73 | select DRM_KMS_FB_HELPER | ||
67 | select FB_SYS_FILLRECT | 74 | select FB_SYS_FILLRECT |
68 | select FB_SYS_COPYAREA | 75 | select FB_SYS_COPYAREA |
69 | select FB_SYS_IMAGEBLIT | 76 | select FB_SYS_IMAGEBLIT |
@@ -96,6 +103,7 @@ config DRM_RADEON | |||
96 | select FB_CFB_IMAGEBLIT | 103 | select FB_CFB_IMAGEBLIT |
97 | select FW_LOADER | 104 | select FW_LOADER |
98 | select DRM_KMS_HELPER | 105 | select DRM_KMS_HELPER |
106 | select DRM_KMS_FB_HELPER | ||
99 | select DRM_TTM | 107 | select DRM_TTM |
100 | select POWER_SUPPLY | 108 | select POWER_SUPPLY |
101 | select HWMON | 109 | select HWMON |
@@ -120,64 +128,7 @@ config DRM_I810 | |||
120 | selected, the module will be called i810. AGP support is required | 128 | selected, the module will be called i810. AGP support is required |
121 | for this driver to work. | 129 | for this driver to work. |
122 | 130 | ||
123 | config DRM_I915 | 131 | source "drivers/gpu/drm/i915/Kconfig" |
124 | tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" | ||
125 | depends on DRM | ||
126 | depends on AGP | ||
127 | depends on AGP_INTEL | ||
128 | # we need shmfs for the swappable backing store, and in particular | ||
129 | # the shmem_readpage() which depends upon tmpfs | ||
130 | select SHMEM | ||
131 | select TMPFS | ||
132 | select DRM_KMS_HELPER | ||
133 | select FB_CFB_FILLRECT | ||
134 | select FB_CFB_COPYAREA | ||
135 | select FB_CFB_IMAGEBLIT | ||
136 | # i915 depends on ACPI_VIDEO when ACPI is enabled | ||
137 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | ||
138 | select BACKLIGHT_LCD_SUPPORT if ACPI | ||
139 | select BACKLIGHT_CLASS_DEVICE if ACPI | ||
140 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
141 | select INPUT if ACPI | ||
142 | select THERMAL if ACPI | ||
143 | select ACPI_VIDEO if ACPI | ||
144 | select ACPI_BUTTON if ACPI | ||
145 | help | ||
146 | Choose this option if you have a system that has "Intel Graphics | ||
147 | Media Accelerator" or "HD Graphics" integrated graphics, | ||
148 | including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, | ||
149 | G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, | ||
150 | Core i5, Core i7 as well as Atom CPUs with integrated graphics. | ||
151 | If M is selected, the module will be called i915. AGP support | ||
152 | is required for this driver to work. This driver is used by | ||
153 | the Intel driver in X.org 6.8 and XFree86 4.4 and above. It | ||
154 | replaces the older i830 module that supported a subset of the | ||
155 | hardware in older X.org releases. | ||
156 | |||
157 | Note that the older i810/i815 chipsets require the use of the | ||
158 | i810 driver instead, and the Atom z5xx series has an entirely | ||
159 | different implementation. | ||
160 | |||
161 | config DRM_I915_KMS | ||
162 | bool "Enable modesetting on intel by default" | ||
163 | depends on DRM_I915 | ||
164 | help | ||
165 | Choose this option if you want kernel modesetting enabled by default, | ||
166 | and you have a new enough userspace to support this. Running old | ||
167 | userspaces with this enabled will cause pain. Note that this causes | ||
168 | the driver to bind to PCI devices, which precludes loading things | ||
169 | like intelfb. | ||
170 | |||
171 | config DRM_I915_PRELIMINARY_HW_SUPPORT | ||
172 | bool "Enable preliminary support for prerelease Intel hardware by default" | ||
173 | depends on DRM_I915 | ||
174 | help | ||
175 | Choose this option if you have prerelease Intel hardware and want the | ||
176 | i915 driver to support it by default. You can enable such support at | ||
177 | runtime with the module option i915.preliminary_hw_support=1; this | ||
178 | option changes the default for that module option. | ||
179 | |||
180 | If in doubt, say "N". | ||
181 | 132 | ||
182 | config DRM_MGA | 133 | config DRM_MGA |
183 | tristate "Matrox g200/g400" | 134 | tristate "Matrox g200/g400" |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 385f460459d4..5266e9fae216 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -21,8 +21,9 @@ drm-$(CONFIG_PCI) += ati_pcigart.o | |||
21 | 21 | ||
22 | drm-usb-y := drm_usb.o | 22 | drm-usb-y := drm_usb.o |
23 | 23 | ||
24 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o | 24 | drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o |
25 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o | 25 | drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o |
26 | drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o | ||
26 | drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o | 27 | drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o |
27 | 28 | ||
28 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | 29 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o |
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index da4a51eae824..8a784c460c89 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig | |||
@@ -6,6 +6,7 @@ config DRM_AST | |||
6 | select FB_SYS_FILLRECT | 6 | select FB_SYS_FILLRECT |
7 | select FB_SYS_IMAGEBLIT | 7 | select FB_SYS_IMAGEBLIT |
8 | select DRM_KMS_HELPER | 8 | select DRM_KMS_HELPER |
9 | select DRM_KMS_FB_HELPER | ||
9 | select DRM_TTM | 10 | select DRM_TTM |
10 | help | 11 | help |
11 | Say yes for experimental AST GPU driver. Do not enable | 12 | Say yes for experimental AST GPU driver. Do not enable |
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig index bf67b22723f9..9864559e5fb9 100644 --- a/drivers/gpu/drm/cirrus/Kconfig +++ b/drivers/gpu/drm/cirrus/Kconfig | |||
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU | |||
5 | select FB_SYS_COPYAREA | 5 | select FB_SYS_COPYAREA |
6 | select FB_SYS_IMAGEBLIT | 6 | select FB_SYS_IMAGEBLIT |
7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
8 | select DRM_KMS_FB_HELPER | ||
8 | select DRM_TTM | 9 | select DRM_TTM |
9 | help | 10 | help |
10 | This is a KMS driver for emulated cirrus device in qemu. | 11 | This is a KMS driver for emulated cirrus device in qemu. |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 55ea4ac6fd50..dbcd68709ab7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -39,6 +39,10 @@ | |||
39 | #include <drm/drm_fb_helper.h> | 39 | #include <drm/drm_fb_helper.h> |
40 | #include <drm/drm_edid.h> | 40 | #include <drm/drm_edid.h> |
41 | 41 | ||
42 | MODULE_AUTHOR("David Airlie, Jesse Barnes"); | ||
43 | MODULE_DESCRIPTION("DRM KMS helper"); | ||
44 | MODULE_LICENSE("GPL and additional rights"); | ||
45 | |||
42 | /** | 46 | /** |
43 | * drm_helper_move_panel_connectors_to_head() - move panels to the front in the | 47 | * drm_helper_move_panel_connectors_to_head() - move panels to the front in the |
44 | * connector list | 48 | * connector list |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9e81609b1e29..f1764ec5818b 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -2652,6 +2652,50 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) | |||
2652 | return 1; | 2652 | return 1; |
2653 | } | 2653 | } |
2654 | 2654 | ||
2655 | static int add_3d_struct_modes(struct drm_connector *connector, u16 structure, | ||
2656 | const u8 *video_db, u8 video_len, u8 video_index) | ||
2657 | { | ||
2658 | struct drm_device *dev = connector->dev; | ||
2659 | struct drm_display_mode *newmode; | ||
2660 | int modes = 0; | ||
2661 | u8 cea_mode; | ||
2662 | |||
2663 | if (video_db == NULL || video_index > video_len) | ||
2664 | return 0; | ||
2665 | |||
2666 | /* CEA modes are numbered 1..127 */ | ||
2667 | cea_mode = (video_db[video_index] & 127) - 1; | ||
2668 | if (cea_mode >= ARRAY_SIZE(edid_cea_modes)) | ||
2669 | return 0; | ||
2670 | |||
2671 | if (structure & (1 << 0)) { | ||
2672 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | ||
2673 | if (newmode) { | ||
2674 | newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING; | ||
2675 | drm_mode_probed_add(connector, newmode); | ||
2676 | modes++; | ||
2677 | } | ||
2678 | } | ||
2679 | if (structure & (1 << 6)) { | ||
2680 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | ||
2681 | if (newmode) { | ||
2682 | newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM; | ||
2683 | drm_mode_probed_add(connector, newmode); | ||
2684 | modes++; | ||
2685 | } | ||
2686 | } | ||
2687 | if (structure & (1 << 8)) { | ||
2688 | newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]); | ||
2689 | if (newmode) { | ||
2690 | newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF; | ||
2691 | drm_mode_probed_add(connector, newmode); | ||
2692 | modes++; | ||
2693 | } | ||
2694 | } | ||
2695 | |||
2696 | return modes; | ||
2697 | } | ||
2698 | |||
2655 | /* | 2699 | /* |
2656 | * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block | 2700 | * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block |
2657 | * @connector: connector corresponding to the HDMI sink | 2701 | * @connector: connector corresponding to the HDMI sink |
@@ -2662,10 +2706,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic) | |||
2662 | * also adds the stereo 3d modes when applicable. | 2706 | * also adds the stereo 3d modes when applicable. |
2663 | */ | 2707 | */ |
2664 | static int | 2708 | static int |
2665 | do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) | 2709 | do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len, |
2710 | const u8 *video_db, u8 video_len) | ||
2666 | { | 2711 | { |
2667 | int modes = 0, offset = 0, i; | 2712 | int modes = 0, offset = 0, i, multi_present = 0; |
2668 | u8 vic_len; | 2713 | u8 vic_len, hdmi_3d_len = 0; |
2714 | u16 mask; | ||
2715 | u16 structure_all; | ||
2669 | 2716 | ||
2670 | if (len < 8) | 2717 | if (len < 8) |
2671 | goto out; | 2718 | goto out; |
@@ -2689,11 +2736,16 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) | |||
2689 | 2736 | ||
2690 | /* 3D_Present */ | 2737 | /* 3D_Present */ |
2691 | offset++; | 2738 | offset++; |
2692 | if (db[8 + offset] & (1 << 7)) | 2739 | if (db[8 + offset] & (1 << 7)) { |
2693 | modes += add_hdmi_mandatory_stereo_modes(connector); | 2740 | modes += add_hdmi_mandatory_stereo_modes(connector); |
2694 | 2741 | ||
2742 | /* 3D_Multi_present */ | ||
2743 | multi_present = (db[8 + offset] & 0x60) >> 5; | ||
2744 | } | ||
2745 | |||
2695 | offset++; | 2746 | offset++; |
2696 | vic_len = db[8 + offset] >> 5; | 2747 | vic_len = db[8 + offset] >> 5; |
2748 | hdmi_3d_len = db[8 + offset] & 0x1f; | ||
2697 | 2749 | ||
2698 | for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { | 2750 | for (i = 0; i < vic_len && len >= (9 + offset + i); i++) { |
2699 | u8 vic; | 2751 | u8 vic; |
@@ -2701,6 +2753,35 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len) | |||
2701 | vic = db[9 + offset + i]; | 2753 | vic = db[9 + offset + i]; |
2702 | modes += add_hdmi_mode(connector, vic); | 2754 | modes += add_hdmi_mode(connector, vic); |
2703 | } | 2755 | } |
2756 | offset += 1 + vic_len; | ||
2757 | |||
2758 | if (!(multi_present == 1 || multi_present == 2)) | ||
2759 | goto out; | ||
2760 | |||
2761 | if ((multi_present == 1 && len < (9 + offset)) || | ||
2762 | (multi_present == 2 && len < (11 + offset))) | ||
2763 | goto out; | ||
2764 | |||
2765 | if ((multi_present == 1 && hdmi_3d_len < 2) || | ||
2766 | (multi_present == 2 && hdmi_3d_len < 4)) | ||
2767 | goto out; | ||
2768 | |||
2769 | /* 3D_Structure_ALL */ | ||
2770 | structure_all = (db[8 + offset] << 8) | db[9 + offset]; | ||
2771 | |||
2772 | /* check if 3D_MASK is present */ | ||
2773 | if (multi_present == 2) | ||
2774 | mask = (db[10 + offset] << 8) | db[11 + offset]; | ||
2775 | else | ||
2776 | mask = 0xffff; | ||
2777 | |||
2778 | for (i = 0; i < 16; i++) { | ||
2779 | if (mask & (1 << i)) | ||
2780 | modes += add_3d_struct_modes(connector, | ||
2781 | structure_all, | ||
2782 | video_db, | ||
2783 | video_len, i); | ||
2784 | } | ||
2704 | 2785 | ||
2705 | out: | 2786 | out: |
2706 | return modes; | 2787 | return modes; |
@@ -2759,8 +2840,8 @@ static int | |||
2759 | add_cea_modes(struct drm_connector *connector, struct edid *edid) | 2840 | add_cea_modes(struct drm_connector *connector, struct edid *edid) |
2760 | { | 2841 | { |
2761 | const u8 *cea = drm_find_cea_extension(edid); | 2842 | const u8 *cea = drm_find_cea_extension(edid); |
2762 | const u8 *db, *hdmi = NULL; | 2843 | const u8 *db, *hdmi = NULL, *video = NULL; |
2763 | u8 dbl, hdmi_len; | 2844 | u8 dbl, hdmi_len, video_len = 0; |
2764 | int modes = 0; | 2845 | int modes = 0; |
2765 | 2846 | ||
2766 | if (cea && cea_revision(cea) >= 3) { | 2847 | if (cea && cea_revision(cea) >= 3) { |
@@ -2773,8 +2854,11 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) | |||
2773 | db = &cea[i]; | 2854 | db = &cea[i]; |
2774 | dbl = cea_db_payload_len(db); | 2855 | dbl = cea_db_payload_len(db); |
2775 | 2856 | ||
2776 | if (cea_db_tag(db) == VIDEO_BLOCK) | 2857 | if (cea_db_tag(db) == VIDEO_BLOCK) { |
2777 | modes += do_cea_modes(connector, db + 1, dbl); | 2858 | video = db + 1; |
2859 | video_len = dbl; | ||
2860 | modes += do_cea_modes(connector, video, dbl); | ||
2861 | } | ||
2778 | else if (cea_db_is_hdmi_vsdb(db)) { | 2862 | else if (cea_db_is_hdmi_vsdb(db)) { |
2779 | hdmi = db; | 2863 | hdmi = db; |
2780 | hdmi_len = dbl; | 2864 | hdmi_len = dbl; |
@@ -2787,7 +2871,8 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid) | |||
2787 | * be patching their flags when the sink supports stereo 3D. | 2871 | * be patching their flags when the sink supports stereo 3D. |
2788 | */ | 2872 | */ |
2789 | if (hdmi) | 2873 | if (hdmi) |
2790 | modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len); | 2874 | modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video, |
2875 | video_len); | ||
2791 | 2876 | ||
2792 | return modes; | 2877 | return modes; |
2793 | } | 2878 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 21742a81cb9c..720352345452 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -39,10 +39,6 @@ | |||
39 | #include <drm/drm_fb_helper.h> | 39 | #include <drm/drm_fb_helper.h> |
40 | #include <drm/drm_crtc_helper.h> | 40 | #include <drm/drm_crtc_helper.h> |
41 | 41 | ||
42 | MODULE_AUTHOR("David Airlie, Jesse Barnes"); | ||
43 | MODULE_DESCRIPTION("DRM KMS helper"); | ||
44 | MODULE_LICENSE("GPL and additional rights"); | ||
45 | |||
46 | static LIST_HEAD(kernel_fb_helper_list); | 42 | static LIST_HEAD(kernel_fb_helper_list); |
47 | 43 | ||
48 | /** | 44 | /** |
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 45b6ef595965..f227f544aa36 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -2,6 +2,7 @@ config DRM_EXYNOS | |||
2 | tristate "DRM Support for Samsung SoC EXYNOS Series" | 2 | tristate "DRM Support for Samsung SoC EXYNOS Series" |
3 | depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) | 3 | depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM) |
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_FB_HELPER | ||
5 | select FB_CFB_FILLRECT | 6 | select FB_CFB_FILLRECT |
6 | select FB_CFB_COPYAREA | 7 | select FB_CFB_COPYAREA |
7 | select FB_CFB_IMAGEBLIT | 8 | select FB_CFB_IMAGEBLIT |
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig index 1f6e2dfaaeae..508cf99a292d 100644 --- a/drivers/gpu/drm/gma500/Kconfig +++ b/drivers/gpu/drm/gma500/Kconfig | |||
@@ -5,6 +5,7 @@ config DRM_GMA500 | |||
5 | select FB_CFB_FILLRECT | 5 | select FB_CFB_FILLRECT |
6 | select FB_CFB_IMAGEBLIT | 6 | select FB_CFB_IMAGEBLIT |
7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
8 | select DRM_KMS_FB_HELPER | ||
8 | select DRM_TTM | 9 | select DRM_TTM |
9 | # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 | 10 | # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 |
10 | select ACPI_VIDEO if ACPI | 11 | select ACPI_VIDEO if ACPI |
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig new file mode 100644 index 000000000000..6199d0b5b958 --- /dev/null +++ b/drivers/gpu/drm/i915/Kconfig | |||
@@ -0,0 +1,67 @@ | |||
1 | config DRM_I915 | ||
2 | tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" | ||
3 | depends on DRM | ||
4 | depends on AGP | ||
5 | depends on AGP_INTEL | ||
6 | # we need shmfs for the swappable backing store, and in particular | ||
7 | # the shmem_readpage() which depends upon tmpfs | ||
8 | select SHMEM | ||
9 | select TMPFS | ||
10 | select DRM_KMS_HELPER | ||
11 | # i915 depends on ACPI_VIDEO when ACPI is enabled | ||
12 | # but for select to work, need to select ACPI_VIDEO's dependencies, ick | ||
13 | select BACKLIGHT_LCD_SUPPORT if ACPI | ||
14 | select BACKLIGHT_CLASS_DEVICE if ACPI | ||
15 | select VIDEO_OUTPUT_CONTROL if ACPI | ||
16 | select INPUT if ACPI | ||
17 | select ACPI_VIDEO if ACPI | ||
18 | select ACPI_BUTTON if ACPI | ||
19 | help | ||
20 | Choose this option if you have a system that has "Intel Graphics | ||
21 | Media Accelerator" or "HD Graphics" integrated graphics, | ||
22 | including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G, | ||
23 | G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3, | ||
24 | Core i5, Core i7 as well as Atom CPUs with integrated graphics. | ||
25 | If M is selected, the module will be called i915. AGP support | ||
26 | is required for this driver to work. This driver is used by | ||
27 | the Intel driver in X.org 6.8 and XFree86 4.4 and above. It | ||
28 | replaces the older i830 module that supported a subset of the | ||
29 | hardware in older X.org releases. | ||
30 | |||
31 | Note that the older i810/i815 chipsets require the use of the | ||
32 | i810 driver instead, and the Atom z5xx series has an entirely | ||
33 | different implementation. | ||
34 | |||
35 | config DRM_I915_KMS | ||
36 | bool "Enable modesetting on intel by default" | ||
37 | depends on DRM_I915 | ||
38 | help | ||
39 | Choose this option if you want kernel modesetting enabled by default, | ||
40 | and you have a new enough userspace to support this. Running old | ||
41 | userspaces with this enabled will cause pain. Note that this causes | ||
42 | the driver to bind to PCI devices, which precludes loading things | ||
43 | like intelfb. | ||
44 | |||
45 | config DRM_I915_FBDEV | ||
46 | bool "Enable legacy fbdev support for the modesettting intel driver" | ||
47 | depends on DRM_I915 | ||
48 | select DRM_KMS_FB_HELPER | ||
49 | select FB_CFB_FILLRECT | ||
50 | select FB_CFB_COPYAREA | ||
51 | select FB_CFB_IMAGEBLIT | ||
52 | default y | ||
53 | help | ||
54 | Choose this option if you have a need for the legacy fbdev | ||
55 | support. Note that this support also provide the linux console | ||
56 | support on top of the intel modesetting driver. | ||
57 | |||
58 | config DRM_I915_PRELIMINARY_HW_SUPPORT | ||
59 | bool "Enable preliminary support for prerelease Intel hardware by default" | ||
60 | depends on DRM_I915 | ||
61 | help | ||
62 | Choose this option if you have prerelease Intel hardware and want the | ||
63 | i915 driver to support it by default. You can enable such support at | ||
64 | runtime with the module option i915.preliminary_hw_support=1; this | ||
65 | option changes the default for that module option. | ||
66 | |||
67 | If in doubt, say "N". | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 65e60d26891b..41838eaa799c 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -33,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ | |||
33 | intel_panel.o \ | 33 | intel_panel.o \ |
34 | intel_pm.o \ | 34 | intel_pm.o \ |
35 | intel_i2c.o \ | 35 | intel_i2c.o \ |
36 | intel_fb.o \ | ||
37 | intel_tv.o \ | 36 | intel_tv.o \ |
38 | intel_dvo.o \ | 37 | intel_dvo.o \ |
39 | intel_ringbuffer.o \ | 38 | intel_ringbuffer.o \ |
@@ -54,6 +53,8 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o | |||
54 | 53 | ||
55 | i915-$(CONFIG_ACPI) += intel_acpi.o | 54 | i915-$(CONFIG_ACPI) += intel_acpi.o |
56 | 55 | ||
56 | i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o | ||
57 | |||
57 | obj-$(CONFIG_DRM_I915) += i915.o | 58 | obj-$(CONFIG_DRM_I915) += i915.o |
58 | 59 | ||
59 | CFLAGS_i915_trace_points.o := -I$(src) | 60 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 61fd61969e21..061182a0ce1b 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -27,6 +27,8 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/circ_buf.h> | ||
31 | #include <linux/ctype.h> | ||
30 | #include <linux/debugfs.h> | 32 | #include <linux/debugfs.h> |
31 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
32 | #include <linux/export.h> | 34 | #include <linux/export.h> |
@@ -38,9 +40,6 @@ | |||
38 | #include <drm/i915_drm.h> | 40 | #include <drm/i915_drm.h> |
39 | #include "i915_drv.h" | 41 | #include "i915_drv.h" |
40 | 42 | ||
41 | #define DRM_I915_RING_DEBUG 1 | ||
42 | |||
43 | |||
44 | #if defined(CONFIG_DEBUG_FS) | 43 | #if defined(CONFIG_DEBUG_FS) |
45 | 44 | ||
46 | enum { | 45 | enum { |
@@ -54,6 +53,32 @@ static const char *yesno(int v) | |||
54 | return v ? "yes" : "no"; | 53 | return v ? "yes" : "no"; |
55 | } | 54 | } |
56 | 55 | ||
56 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | ||
57 | * allocated we need to hook into the minor for release. */ | ||
58 | static int | ||
59 | drm_add_fake_info_node(struct drm_minor *minor, | ||
60 | struct dentry *ent, | ||
61 | const void *key) | ||
62 | { | ||
63 | struct drm_info_node *node; | ||
64 | |||
65 | node = kmalloc(sizeof(*node), GFP_KERNEL); | ||
66 | if (node == NULL) { | ||
67 | debugfs_remove(ent); | ||
68 | return -ENOMEM; | ||
69 | } | ||
70 | |||
71 | node->minor = minor; | ||
72 | node->dent = ent; | ||
73 | node->info_ent = (void *) key; | ||
74 | |||
75 | mutex_lock(&minor->debugfs_lock); | ||
76 | list_add(&node->list, &minor->debugfs_list); | ||
77 | mutex_unlock(&minor->debugfs_lock); | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
57 | static int i915_capabilities(struct seq_file *m, void *data) | 82 | static int i915_capabilities(struct seq_file *m, void *data) |
58 | { | 83 | { |
59 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 84 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
@@ -850,6 +875,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
850 | drm_i915_private_t *dev_priv = dev->dev_private; | 875 | drm_i915_private_t *dev_priv = dev->dev_private; |
851 | int ret; | 876 | int ret; |
852 | 877 | ||
878 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
879 | |||
853 | if (IS_GEN5(dev)) { | 880 | if (IS_GEN5(dev)) { |
854 | u16 rgvswctl = I915_READ16(MEMSWCTL); | 881 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
855 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); | 882 | u16 rgvstat = I915_READ16(MEMSTAT_ILK); |
@@ -1328,6 +1355,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1328 | return 0; | 1355 | return 0; |
1329 | } | 1356 | } |
1330 | 1357 | ||
1358 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
1359 | |||
1331 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 1360 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1332 | if (ret) | 1361 | if (ret) |
1333 | return ret; | 1362 | return ret; |
@@ -1402,12 +1431,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
1402 | { | 1431 | { |
1403 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 1432 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1404 | struct drm_device *dev = node->minor->dev; | 1433 | struct drm_device *dev = node->minor->dev; |
1405 | drm_i915_private_t *dev_priv = dev->dev_private; | 1434 | struct intel_fbdev *ifbdev = NULL; |
1406 | struct intel_fbdev *ifbdev; | ||
1407 | struct intel_framebuffer *fb; | 1435 | struct intel_framebuffer *fb; |
1408 | int ret; | ||
1409 | 1436 | ||
1410 | ret = mutex_lock_interruptible(&dev->mode_config.mutex); | 1437 | #ifdef CONFIG_DRM_I915_FBDEV |
1438 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1439 | int ret = mutex_lock_interruptible(&dev->mode_config.mutex); | ||
1411 | if (ret) | 1440 | if (ret) |
1412 | return ret; | 1441 | return ret; |
1413 | 1442 | ||
@@ -1423,10 +1452,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) | |||
1423 | describe_obj(m, fb->obj); | 1452 | describe_obj(m, fb->obj); |
1424 | seq_putc(m, '\n'); | 1453 | seq_putc(m, '\n'); |
1425 | mutex_unlock(&dev->mode_config.mutex); | 1454 | mutex_unlock(&dev->mode_config.mutex); |
1455 | #endif | ||
1426 | 1456 | ||
1427 | mutex_lock(&dev->mode_config.fb_lock); | 1457 | mutex_lock(&dev->mode_config.fb_lock); |
1428 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { | 1458 | list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { |
1429 | if (&fb->base == ifbdev->helper.fb) | 1459 | if (ifbdev && &fb->base == ifbdev->helper.fb) |
1430 | continue; | 1460 | continue; |
1431 | 1461 | ||
1432 | seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", | 1462 | seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ", |
@@ -1730,6 +1760,467 @@ static int i915_pc8_status(struct seq_file *m, void *unused) | |||
1730 | return 0; | 1760 | return 0; |
1731 | } | 1761 | } |
1732 | 1762 | ||
1763 | struct pipe_crc_info { | ||
1764 | const char *name; | ||
1765 | struct drm_device *dev; | ||
1766 | enum pipe pipe; | ||
1767 | }; | ||
1768 | |||
1769 | static int i915_pipe_crc_open(struct inode *inode, struct file *filep) | ||
1770 | { | ||
1771 | struct pipe_crc_info *info = inode->i_private; | ||
1772 | struct drm_i915_private *dev_priv = info->dev->dev_private; | ||
1773 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
1774 | |||
1775 | if (!atomic_dec_and_test(&pipe_crc->available)) { | ||
1776 | atomic_inc(&pipe_crc->available); | ||
1777 | return -EBUSY; /* already open */ | ||
1778 | } | ||
1779 | |||
1780 | filep->private_data = inode->i_private; | ||
1781 | |||
1782 | return 0; | ||
1783 | } | ||
1784 | |||
1785 | static int i915_pipe_crc_release(struct inode *inode, struct file *filep) | ||
1786 | { | ||
1787 | struct pipe_crc_info *info = inode->i_private; | ||
1788 | struct drm_i915_private *dev_priv = info->dev->dev_private; | ||
1789 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
1790 | |||
1791 | atomic_inc(&pipe_crc->available); /* release the device */ | ||
1792 | |||
1793 | return 0; | ||
1794 | } | ||
1795 | |||
1796 | /* (6 fields, 8 chars each, space separated (5) + '\n') */ | ||
1797 | #define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1) | ||
1798 | /* account for \'0' */ | ||
1799 | #define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1) | ||
1800 | |||
1801 | static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc) | ||
1802 | { | ||
1803 | int head, tail; | ||
1804 | |||
1805 | head = atomic_read(&pipe_crc->head); | ||
1806 | tail = atomic_read(&pipe_crc->tail); | ||
1807 | |||
1808 | return CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR); | ||
1809 | } | ||
1810 | |||
1811 | static ssize_t | ||
1812 | i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count, | ||
1813 | loff_t *pos) | ||
1814 | { | ||
1815 | struct pipe_crc_info *info = filep->private_data; | ||
1816 | struct drm_device *dev = info->dev; | ||
1817 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1818 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; | ||
1819 | char buf[PIPE_CRC_BUFFER_LEN]; | ||
1820 | int head, tail, n_entries, n; | ||
1821 | ssize_t bytes_read; | ||
1822 | |||
1823 | /* | ||
1824 | * Don't allow user space to provide buffers not big enough to hold | ||
1825 | * a line of data. | ||
1826 | */ | ||
1827 | if (count < PIPE_CRC_LINE_LEN) | ||
1828 | return -EINVAL; | ||
1829 | |||
1830 | if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE) | ||
1831 | return 0; | ||
1832 | |||
1833 | /* nothing to read */ | ||
1834 | while (pipe_crc_data_count(pipe_crc) == 0) { | ||
1835 | if (filep->f_flags & O_NONBLOCK) | ||
1836 | return -EAGAIN; | ||
1837 | |||
1838 | if (wait_event_interruptible(pipe_crc->wq, | ||
1839 | pipe_crc_data_count(pipe_crc))) | ||
1840 | return -ERESTARTSYS; | ||
1841 | } | ||
1842 | |||
1843 | /* We now have one or more entries to read */ | ||
1844 | head = atomic_read(&pipe_crc->head); | ||
1845 | tail = atomic_read(&pipe_crc->tail); | ||
1846 | n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR), | ||
1847 | count / PIPE_CRC_LINE_LEN); | ||
1848 | bytes_read = 0; | ||
1849 | n = 0; | ||
1850 | do { | ||
1851 | struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail]; | ||
1852 | int ret; | ||
1853 | |||
1854 | bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN, | ||
1855 | "%8u %8x %8x %8x %8x %8x\n", | ||
1856 | entry->frame, entry->crc[0], | ||
1857 | entry->crc[1], entry->crc[2], | ||
1858 | entry->crc[3], entry->crc[4]); | ||
1859 | |||
1860 | ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN, | ||
1861 | buf, PIPE_CRC_LINE_LEN); | ||
1862 | if (ret == PIPE_CRC_LINE_LEN) | ||
1863 | return -EFAULT; | ||
1864 | |||
1865 | BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR); | ||
1866 | tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
1867 | atomic_set(&pipe_crc->tail, tail); | ||
1868 | n++; | ||
1869 | } while (--n_entries); | ||
1870 | |||
1871 | return bytes_read; | ||
1872 | } | ||
1873 | |||
1874 | static const struct file_operations i915_pipe_crc_fops = { | ||
1875 | .owner = THIS_MODULE, | ||
1876 | .open = i915_pipe_crc_open, | ||
1877 | .read = i915_pipe_crc_read, | ||
1878 | .release = i915_pipe_crc_release, | ||
1879 | }; | ||
1880 | |||
1881 | static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = { | ||
1882 | { | ||
1883 | .name = "i915_pipe_A_crc", | ||
1884 | .pipe = PIPE_A, | ||
1885 | }, | ||
1886 | { | ||
1887 | .name = "i915_pipe_B_crc", | ||
1888 | .pipe = PIPE_B, | ||
1889 | }, | ||
1890 | { | ||
1891 | .name = "i915_pipe_C_crc", | ||
1892 | .pipe = PIPE_C, | ||
1893 | }, | ||
1894 | }; | ||
1895 | |||
1896 | static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor, | ||
1897 | enum pipe pipe) | ||
1898 | { | ||
1899 | struct drm_device *dev = minor->dev; | ||
1900 | struct dentry *ent; | ||
1901 | struct pipe_crc_info *info = &i915_pipe_crc_data[pipe]; | ||
1902 | |||
1903 | info->dev = dev; | ||
1904 | ent = debugfs_create_file(info->name, S_IRUGO, root, info, | ||
1905 | &i915_pipe_crc_fops); | ||
1906 | if (IS_ERR(ent)) | ||
1907 | return PTR_ERR(ent); | ||
1908 | |||
1909 | return drm_add_fake_info_node(minor, ent, info); | ||
1910 | } | ||
1911 | |||
1912 | static const char * const pipe_crc_sources[] = { | ||
1913 | "none", | ||
1914 | "plane1", | ||
1915 | "plane2", | ||
1916 | "pf", | ||
1917 | "pipe", | ||
1918 | }; | ||
1919 | |||
1920 | static const char *pipe_crc_source_name(enum intel_pipe_crc_source source) | ||
1921 | { | ||
1922 | BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX); | ||
1923 | return pipe_crc_sources[source]; | ||
1924 | } | ||
1925 | |||
1926 | static int display_crc_ctl_show(struct seq_file *m, void *data) | ||
1927 | { | ||
1928 | struct drm_device *dev = m->private; | ||
1929 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1930 | int i; | ||
1931 | |||
1932 | for (i = 0; i < I915_MAX_PIPES; i++) | ||
1933 | seq_printf(m, "%c %s\n", pipe_name(i), | ||
1934 | pipe_crc_source_name(dev_priv->pipe_crc[i].source)); | ||
1935 | |||
1936 | return 0; | ||
1937 | } | ||
1938 | |||
1939 | static int display_crc_ctl_open(struct inode *inode, struct file *file) | ||
1940 | { | ||
1941 | struct drm_device *dev = inode->i_private; | ||
1942 | |||
1943 | return single_open(file, display_crc_ctl_show, dev); | ||
1944 | } | ||
1945 | |||
1946 | static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, | ||
1947 | uint32_t *val) | ||
1948 | { | ||
1949 | switch (source) { | ||
1950 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
1951 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK; | ||
1952 | break; | ||
1953 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
1954 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK; | ||
1955 | break; | ||
1956 | case INTEL_PIPE_CRC_SOURCE_PF: | ||
1957 | return -EINVAL; | ||
1958 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
1959 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK; | ||
1960 | break; | ||
1961 | default: | ||
1962 | *val = 0; | ||
1963 | break; | ||
1964 | } | ||
1965 | |||
1966 | return 0; | ||
1967 | } | ||
1968 | |||
1969 | static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source, | ||
1970 | uint32_t *val) | ||
1971 | { | ||
1972 | switch (source) { | ||
1973 | case INTEL_PIPE_CRC_SOURCE_PLANE1: | ||
1974 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB; | ||
1975 | break; | ||
1976 | case INTEL_PIPE_CRC_SOURCE_PLANE2: | ||
1977 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; | ||
1978 | break; | ||
1979 | case INTEL_PIPE_CRC_SOURCE_PF: | ||
1980 | *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; | ||
1981 | break; | ||
1982 | case INTEL_PIPE_CRC_SOURCE_PIPE: | ||
1983 | return -EINVAL; | ||
1984 | default: | ||
1985 | *val = 0; | ||
1986 | break; | ||
1987 | } | ||
1988 | |||
1989 | return 0; | ||
1990 | } | ||
1991 | |||
1992 | static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | ||
1993 | enum intel_pipe_crc_source source) | ||
1994 | { | ||
1995 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1996 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
1997 | u32 val; | ||
1998 | int ret; | ||
1999 | |||
2000 | if (!(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev))) | ||
2001 | return -ENODEV; | ||
2002 | |||
2003 | if (pipe_crc->source == source) | ||
2004 | return 0; | ||
2005 | |||
2006 | /* forbid changing the source without going back to 'none' */ | ||
2007 | if (pipe_crc->source && source) | ||
2008 | return -EINVAL; | ||
2009 | |||
2010 | if (IS_GEN5(dev) || IS_GEN6(dev)) | ||
2011 | ret = ilk_pipe_crc_ctl_reg(source, &val); | ||
2012 | else | ||
2013 | ret = ivb_pipe_crc_ctl_reg(source, &val); | ||
2014 | |||
2015 | if (ret != 0) | ||
2016 | return ret; | ||
2017 | |||
2018 | /* none -> real source transition */ | ||
2019 | if (source) { | ||
2020 | DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n", | ||
2021 | pipe_name(pipe), pipe_crc_source_name(source)); | ||
2022 | |||
2023 | pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) * | ||
2024 | INTEL_PIPE_CRC_ENTRIES_NR, | ||
2025 | GFP_KERNEL); | ||
2026 | if (!pipe_crc->entries) | ||
2027 | return -ENOMEM; | ||
2028 | |||
2029 | atomic_set(&pipe_crc->head, 0); | ||
2030 | atomic_set(&pipe_crc->tail, 0); | ||
2031 | } | ||
2032 | |||
2033 | pipe_crc->source = source; | ||
2034 | |||
2035 | I915_WRITE(PIPE_CRC_CTL(pipe), val); | ||
2036 | POSTING_READ(PIPE_CRC_CTL(pipe)); | ||
2037 | |||
2038 | /* real source -> none transition */ | ||
2039 | if (source == INTEL_PIPE_CRC_SOURCE_NONE) { | ||
2040 | DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n", | ||
2041 | pipe_name(pipe)); | ||
2042 | |||
2043 | intel_wait_for_vblank(dev, pipe); | ||
2044 | |||
2045 | kfree(pipe_crc->entries); | ||
2046 | pipe_crc->entries = NULL; | ||
2047 | } | ||
2048 | |||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
2052 | /* | ||
2053 | * Parse pipe CRC command strings: | ||
2054 | * command: wsp* object wsp+ name wsp+ source wsp* | ||
2055 | * object: 'pipe' | ||
2056 | * name: (A | B | C) | ||
2057 | * source: (none | plane1 | plane2 | pf) | ||
2058 | * wsp: (#0x20 | #0x9 | #0xA)+ | ||
2059 | * | ||
2060 | * eg.: | ||
2061 | * "pipe A plane1" -> Start CRC computations on plane1 of pipe A | ||
2062 | * "pipe A none" -> Stop CRC | ||
2063 | */ | ||
2064 | static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words) | ||
2065 | { | ||
2066 | int n_words = 0; | ||
2067 | |||
2068 | while (*buf) { | ||
2069 | char *end; | ||
2070 | |||
2071 | /* skip leading white space */ | ||
2072 | buf = skip_spaces(buf); | ||
2073 | if (!*buf) | ||
2074 | break; /* end of buffer */ | ||
2075 | |||
2076 | /* find end of word */ | ||
2077 | for (end = buf; *end && !isspace(*end); end++) | ||
2078 | ; | ||
2079 | |||
2080 | if (n_words == max_words) { | ||
2081 | DRM_DEBUG_DRIVER("too many words, allowed <= %d\n", | ||
2082 | max_words); | ||
2083 | return -EINVAL; /* ran out of words[] before bytes */ | ||
2084 | } | ||
2085 | |||
2086 | if (*end) | ||
2087 | *end++ = '\0'; | ||
2088 | words[n_words++] = buf; | ||
2089 | buf = end; | ||
2090 | } | ||
2091 | |||
2092 | return n_words; | ||
2093 | } | ||
2094 | |||
2095 | enum intel_pipe_crc_object { | ||
2096 | PIPE_CRC_OBJECT_PIPE, | ||
2097 | }; | ||
2098 | |||
2099 | static const char * const pipe_crc_objects[] = { | ||
2100 | "pipe", | ||
2101 | }; | ||
2102 | |||
2103 | static int | ||
2104 | display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o) | ||
2105 | { | ||
2106 | int i; | ||
2107 | |||
2108 | for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++) | ||
2109 | if (!strcmp(buf, pipe_crc_objects[i])) { | ||
2110 | *o = i; | ||
2111 | return 0; | ||
2112 | } | ||
2113 | |||
2114 | return -EINVAL; | ||
2115 | } | ||
2116 | |||
2117 | static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe) | ||
2118 | { | ||
2119 | const char name = buf[0]; | ||
2120 | |||
2121 | if (name < 'A' || name >= pipe_name(I915_MAX_PIPES)) | ||
2122 | return -EINVAL; | ||
2123 | |||
2124 | *pipe = name - 'A'; | ||
2125 | |||
2126 | return 0; | ||
2127 | } | ||
2128 | |||
2129 | static int | ||
2130 | display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s) | ||
2131 | { | ||
2132 | int i; | ||
2133 | |||
2134 | for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++) | ||
2135 | if (!strcmp(buf, pipe_crc_sources[i])) { | ||
2136 | *s = i; | ||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | return -EINVAL; | ||
2141 | } | ||
2142 | |||
2143 | static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len) | ||
2144 | { | ||
2145 | #define N_WORDS 3 | ||
2146 | int n_words; | ||
2147 | char *words[N_WORDS]; | ||
2148 | enum pipe pipe; | ||
2149 | enum intel_pipe_crc_object object; | ||
2150 | enum intel_pipe_crc_source source; | ||
2151 | |||
2152 | n_words = display_crc_ctl_tokenize(buf, words, N_WORDS); | ||
2153 | if (n_words != N_WORDS) { | ||
2154 | DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n", | ||
2155 | N_WORDS); | ||
2156 | return -EINVAL; | ||
2157 | } | ||
2158 | |||
2159 | if (display_crc_ctl_parse_object(words[0], &object) < 0) { | ||
2160 | DRM_DEBUG_DRIVER("unknown object %s\n", words[0]); | ||
2161 | return -EINVAL; | ||
2162 | } | ||
2163 | |||
2164 | if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) { | ||
2165 | DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]); | ||
2166 | return -EINVAL; | ||
2167 | } | ||
2168 | |||
2169 | if (display_crc_ctl_parse_source(words[2], &source) < 0) { | ||
2170 | DRM_DEBUG_DRIVER("unknown source %s\n", words[2]); | ||
2171 | return -EINVAL; | ||
2172 | } | ||
2173 | |||
2174 | return pipe_crc_set_source(dev, pipe, source); | ||
2175 | } | ||
2176 | |||
2177 | static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf, | ||
2178 | size_t len, loff_t *offp) | ||
2179 | { | ||
2180 | struct seq_file *m = file->private_data; | ||
2181 | struct drm_device *dev = m->private; | ||
2182 | char *tmpbuf; | ||
2183 | int ret; | ||
2184 | |||
2185 | if (len == 0) | ||
2186 | return 0; | ||
2187 | |||
2188 | if (len > PAGE_SIZE - 1) { | ||
2189 | DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n", | ||
2190 | PAGE_SIZE); | ||
2191 | return -E2BIG; | ||
2192 | } | ||
2193 | |||
2194 | tmpbuf = kmalloc(len + 1, GFP_KERNEL); | ||
2195 | if (!tmpbuf) | ||
2196 | return -ENOMEM; | ||
2197 | |||
2198 | if (copy_from_user(tmpbuf, ubuf, len)) { | ||
2199 | ret = -EFAULT; | ||
2200 | goto out; | ||
2201 | } | ||
2202 | tmpbuf[len] = '\0'; | ||
2203 | |||
2204 | ret = display_crc_ctl_parse(dev, tmpbuf, len); | ||
2205 | |||
2206 | out: | ||
2207 | kfree(tmpbuf); | ||
2208 | if (ret < 0) | ||
2209 | return ret; | ||
2210 | |||
2211 | *offp += len; | ||
2212 | return len; | ||
2213 | } | ||
2214 | |||
2215 | static const struct file_operations i915_display_crc_ctl_fops = { | ||
2216 | .owner = THIS_MODULE, | ||
2217 | .open = display_crc_ctl_open, | ||
2218 | .read = seq_read, | ||
2219 | .llseek = seq_lseek, | ||
2220 | .release = single_release, | ||
2221 | .write = display_crc_ctl_write | ||
2222 | }; | ||
2223 | |||
1733 | static int | 2224 | static int |
1734 | i915_wedged_get(void *data, u64 *val) | 2225 | i915_wedged_get(void *data, u64 *val) |
1735 | { | 2226 | { |
@@ -1943,6 +2434,8 @@ i915_max_freq_get(void *data, u64 *val) | |||
1943 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 2434 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
1944 | return -ENODEV; | 2435 | return -ENODEV; |
1945 | 2436 | ||
2437 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
2438 | |||
1946 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 2439 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
1947 | if (ret) | 2440 | if (ret) |
1948 | return ret; | 2441 | return ret; |
@@ -1967,6 +2460,8 @@ i915_max_freq_set(void *data, u64 val) | |||
1967 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 2460 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
1968 | return -ENODEV; | 2461 | return -ENODEV; |
1969 | 2462 | ||
2463 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
2464 | |||
1970 | DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); | 2465 | DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); |
1971 | 2466 | ||
1972 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 2467 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
@@ -2005,6 +2500,8 @@ i915_min_freq_get(void *data, u64 *val) | |||
2005 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 2500 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
2006 | return -ENODEV; | 2501 | return -ENODEV; |
2007 | 2502 | ||
2503 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
2504 | |||
2008 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 2505 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
2009 | if (ret) | 2506 | if (ret) |
2010 | return ret; | 2507 | return ret; |
@@ -2029,6 +2526,8 @@ i915_min_freq_set(void *data, u64 val) | |||
2029 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) | 2526 | if (!(IS_GEN6(dev) || IS_GEN7(dev))) |
2030 | return -ENODEV; | 2527 | return -ENODEV; |
2031 | 2528 | ||
2529 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
2530 | |||
2032 | DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); | 2531 | DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); |
2033 | 2532 | ||
2034 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 2533 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); |
@@ -2107,32 +2606,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, | |||
2107 | i915_cache_sharing_get, i915_cache_sharing_set, | 2606 | i915_cache_sharing_get, i915_cache_sharing_set, |
2108 | "%llu\n"); | 2607 | "%llu\n"); |
2109 | 2608 | ||
2110 | /* As the drm_debugfs_init() routines are called before dev->dev_private is | ||
2111 | * allocated we need to hook into the minor for release. */ | ||
2112 | static int | ||
2113 | drm_add_fake_info_node(struct drm_minor *minor, | ||
2114 | struct dentry *ent, | ||
2115 | const void *key) | ||
2116 | { | ||
2117 | struct drm_info_node *node; | ||
2118 | |||
2119 | node = kmalloc(sizeof(*node), GFP_KERNEL); | ||
2120 | if (node == NULL) { | ||
2121 | debugfs_remove(ent); | ||
2122 | return -ENOMEM; | ||
2123 | } | ||
2124 | |||
2125 | node->minor = minor; | ||
2126 | node->dent = ent; | ||
2127 | node->info_ent = (void *) key; | ||
2128 | |||
2129 | mutex_lock(&minor->debugfs_lock); | ||
2130 | list_add(&node->list, &minor->debugfs_list); | ||
2131 | mutex_unlock(&minor->debugfs_lock); | ||
2132 | |||
2133 | return 0; | ||
2134 | } | ||
2135 | |||
2136 | static int i915_forcewake_open(struct inode *inode, struct file *file) | 2609 | static int i915_forcewake_open(struct inode *inode, struct file *file) |
2137 | { | 2610 | { |
2138 | struct drm_device *dev = inode->i_private; | 2611 | struct drm_device *dev = inode->i_private; |
@@ -2254,8 +2727,22 @@ static struct i915_debugfs_files { | |||
2254 | {"i915_gem_drop_caches", &i915_drop_caches_fops}, | 2727 | {"i915_gem_drop_caches", &i915_drop_caches_fops}, |
2255 | {"i915_error_state", &i915_error_state_fops}, | 2728 | {"i915_error_state", &i915_error_state_fops}, |
2256 | {"i915_next_seqno", &i915_next_seqno_fops}, | 2729 | {"i915_next_seqno", &i915_next_seqno_fops}, |
2730 | {"i915_display_crc_ctl", &i915_display_crc_ctl_fops}, | ||
2257 | }; | 2731 | }; |
2258 | 2732 | ||
2733 | void intel_display_crc_init(struct drm_device *dev) | ||
2734 | { | ||
2735 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2736 | int i; | ||
2737 | |||
2738 | for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { | ||
2739 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i]; | ||
2740 | |||
2741 | atomic_set(&pipe_crc->available, 1); | ||
2742 | init_waitqueue_head(&pipe_crc->wq); | ||
2743 | } | ||
2744 | } | ||
2745 | |||
2259 | int i915_debugfs_init(struct drm_minor *minor) | 2746 | int i915_debugfs_init(struct drm_minor *minor) |
2260 | { | 2747 | { |
2261 | int ret, i; | 2748 | int ret, i; |
@@ -2264,6 +2751,12 @@ int i915_debugfs_init(struct drm_minor *minor) | |||
2264 | if (ret) | 2751 | if (ret) |
2265 | return ret; | 2752 | return ret; |
2266 | 2753 | ||
2754 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | ||
2755 | ret = i915_pipe_crc_create(minor->debugfs_root, minor, i); | ||
2756 | if (ret) | ||
2757 | return ret; | ||
2758 | } | ||
2759 | |||
2267 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { | 2760 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { |
2268 | ret = i915_debugfs_create(minor->debugfs_root, minor, | 2761 | ret = i915_debugfs_create(minor->debugfs_root, minor, |
2269 | i915_debugfs_files[i].name, | 2762 | i915_debugfs_files[i].name, |
@@ -2283,8 +2776,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor) | |||
2283 | 2776 | ||
2284 | drm_debugfs_remove_files(i915_debugfs_list, | 2777 | drm_debugfs_remove_files(i915_debugfs_list, |
2285 | I915_DEBUGFS_ENTRIES, minor); | 2778 | I915_DEBUGFS_ENTRIES, minor); |
2779 | |||
2286 | drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, | 2780 | drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, |
2287 | 1, minor); | 2781 | 1, minor); |
2782 | |||
2783 | for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) { | ||
2784 | struct drm_info_list *info_list = | ||
2785 | (struct drm_info_list *)&i915_pipe_crc_data[i]; | ||
2786 | |||
2787 | drm_debugfs_remove_files(info_list, 1, minor); | ||
2788 | } | ||
2789 | |||
2288 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { | 2790 | for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { |
2289 | struct drm_info_list *info_list = | 2791 | struct drm_info_list *info_list = |
2290 | (struct drm_info_list *) i915_debugfs_files[i].fops; | 2792 | (struct drm_info_list *) i915_debugfs_files[i].fops; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index b3873c945d1b..437886641d90 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1416,6 +1416,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master) | |||
1416 | master->driver_priv = NULL; | 1416 | master->driver_priv = NULL; |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | #ifdef CONFIG_DRM_I915_FBDEV | ||
1419 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | 1420 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) |
1420 | { | 1421 | { |
1421 | struct apertures_struct *ap; | 1422 | struct apertures_struct *ap; |
@@ -1436,6 +1437,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | |||
1436 | 1437 | ||
1437 | kfree(ap); | 1438 | kfree(ap); |
1438 | } | 1439 | } |
1440 | #else | ||
1441 | static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) | ||
1442 | { | ||
1443 | } | ||
1444 | #endif | ||
1439 | 1445 | ||
1440 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) | 1446 | static void i915_dump_device_info(struct drm_i915_private *dev_priv) |
1441 | { | 1447 | { |
@@ -1477,8 +1483,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1477 | info = (struct intel_device_info *) flags; | 1483 | info = (struct intel_device_info *) flags; |
1478 | 1484 | ||
1479 | /* Refuse to load on gen6+ without kms enabled. */ | 1485 | /* Refuse to load on gen6+ without kms enabled. */ |
1480 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) | 1486 | if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { |
1487 | DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); | ||
1488 | DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); | ||
1481 | return -ENODEV; | 1489 | return -ENODEV; |
1490 | } | ||
1482 | 1491 | ||
1483 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | 1492 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); |
1484 | if (dev_priv == NULL) | 1493 | if (dev_priv == NULL) |
@@ -1505,6 +1514,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1505 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ | 1514 | dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ |
1506 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); | 1515 | INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); |
1507 | 1516 | ||
1517 | intel_display_crc_init(dev); | ||
1518 | |||
1508 | i915_dump_device_info(dev_priv); | 1519 | i915_dump_device_info(dev_priv); |
1509 | 1520 | ||
1510 | /* Not all pre-production machines fall into this category, only the | 1521 | /* Not all pre-production machines fall into this category, only the |
@@ -1542,15 +1553,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1542 | 1553 | ||
1543 | intel_uncore_early_sanitize(dev); | 1554 | intel_uncore_early_sanitize(dev); |
1544 | 1555 | ||
1545 | if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { | 1556 | /* This must be called before any calls to HAS_PCH_* */ |
1546 | /* The docs do not explain exactly how the calculation can be | 1557 | intel_detect_pch(dev); |
1547 | * made. It is somewhat guessable, but for now, it's always | 1558 | |
1548 | * 128MB. | 1559 | intel_uncore_init(dev); |
1549 | * NB: We can't write IDICR yet because we do not have gt funcs | ||
1550 | * set up */ | ||
1551 | dev_priv->ellc_size = 128; | ||
1552 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | ||
1553 | } | ||
1554 | 1560 | ||
1555 | ret = i915_gem_gtt_init(dev); | 1561 | ret = i915_gem_gtt_init(dev); |
1556 | if (ret) | 1562 | if (ret) |
@@ -1609,13 +1615,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1609 | goto out_mtrrfree; | 1615 | goto out_mtrrfree; |
1610 | } | 1616 | } |
1611 | 1617 | ||
1612 | /* This must be called before any calls to HAS_PCH_* */ | ||
1613 | intel_detect_pch(dev); | ||
1614 | |||
1615 | intel_irq_init(dev); | 1618 | intel_irq_init(dev); |
1616 | intel_pm_init(dev); | 1619 | intel_pm_init(dev); |
1617 | intel_uncore_sanitize(dev); | 1620 | intel_uncore_sanitize(dev); |
1618 | intel_uncore_init(dev); | ||
1619 | 1621 | ||
1620 | /* Try to make sure MCHBAR is enabled before poking at it */ | 1622 | /* Try to make sure MCHBAR is enabled before poking at it */ |
1621 | intel_setup_mchbar(dev); | 1623 | intel_setup_mchbar(dev); |
@@ -1699,6 +1701,7 @@ out_gtt: | |||
1699 | drm_mm_takedown(&dev_priv->gtt.base.mm); | 1701 | drm_mm_takedown(&dev_priv->gtt.base.mm); |
1700 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); | 1702 | dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); |
1701 | out_regs: | 1703 | out_regs: |
1704 | intel_uncore_fini(dev); | ||
1702 | pci_iounmap(dev->pdev, dev_priv->regs); | 1705 | pci_iounmap(dev->pdev, dev_priv->regs); |
1703 | put_bridge: | 1706 | put_bridge: |
1704 | pci_dev_put(dev_priv->bridge_dev); | 1707 | pci_dev_put(dev_priv->bridge_dev); |
@@ -1729,15 +1732,9 @@ int i915_driver_unload(struct drm_device *dev) | |||
1729 | if (dev_priv->mm.inactive_shrinker.scan_objects) | 1732 | if (dev_priv->mm.inactive_shrinker.scan_objects) |
1730 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); | 1733 | unregister_shrinker(&dev_priv->mm.inactive_shrinker); |
1731 | 1734 | ||
1732 | mutex_lock(&dev->struct_mutex); | 1735 | ret = i915_gem_suspend(dev); |
1733 | ret = i915_gpu_idle(dev); | ||
1734 | if (ret) | 1736 | if (ret) |
1735 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 1737 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
1736 | i915_gem_retire_requests(dev); | ||
1737 | mutex_unlock(&dev->struct_mutex); | ||
1738 | |||
1739 | /* Cancel the retire work handler, which should be idle now. */ | ||
1740 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | ||
1741 | 1738 | ||
1742 | io_mapping_free(dev_priv->gtt.mappable); | 1739 | io_mapping_free(dev_priv->gtt.mappable); |
1743 | arch_phys_wc_del(dev_priv->gtt.mtrr); | 1740 | arch_phys_wc_del(dev_priv->gtt.mtrr); |
@@ -1852,7 +1849,7 @@ void i915_driver_lastclose(struct drm_device * dev) | |||
1852 | return; | 1849 | return; |
1853 | 1850 | ||
1854 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 1851 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1855 | intel_fb_restore_mode(dev); | 1852 | intel_fbdev_restore_mode(dev); |
1856 | vga_switcheroo_process_delayed_switch(); | 1853 | vga_switcheroo_process_delayed_switch(); |
1857 | return; | 1854 | return; |
1858 | } | 1855 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 96f230497cbe..1060a96d2184 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -160,49 +160,58 @@ extern int intel_agp_enabled; | |||
160 | static const struct intel_device_info intel_i830_info = { | 160 | static const struct intel_device_info intel_i830_info = { |
161 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, | 161 | .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
162 | .has_overlay = 1, .overlay_needs_physical = 1, | 162 | .has_overlay = 1, .overlay_needs_physical = 1, |
163 | .ring_mask = RENDER_RING, | ||
163 | }; | 164 | }; |
164 | 165 | ||
165 | static const struct intel_device_info intel_845g_info = { | 166 | static const struct intel_device_info intel_845g_info = { |
166 | .gen = 2, .num_pipes = 1, | 167 | .gen = 2, .num_pipes = 1, |
167 | .has_overlay = 1, .overlay_needs_physical = 1, | 168 | .has_overlay = 1, .overlay_needs_physical = 1, |
169 | .ring_mask = RENDER_RING, | ||
168 | }; | 170 | }; |
169 | 171 | ||
170 | static const struct intel_device_info intel_i85x_info = { | 172 | static const struct intel_device_info intel_i85x_info = { |
171 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, | 173 | .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, |
172 | .cursor_needs_physical = 1, | 174 | .cursor_needs_physical = 1, |
173 | .has_overlay = 1, .overlay_needs_physical = 1, | 175 | .has_overlay = 1, .overlay_needs_physical = 1, |
176 | .ring_mask = RENDER_RING, | ||
174 | }; | 177 | }; |
175 | 178 | ||
176 | static const struct intel_device_info intel_i865g_info = { | 179 | static const struct intel_device_info intel_i865g_info = { |
177 | .gen = 2, .num_pipes = 1, | 180 | .gen = 2, .num_pipes = 1, |
178 | .has_overlay = 1, .overlay_needs_physical = 1, | 181 | .has_overlay = 1, .overlay_needs_physical = 1, |
182 | .ring_mask = RENDER_RING, | ||
179 | }; | 183 | }; |
180 | 184 | ||
181 | static const struct intel_device_info intel_i915g_info = { | 185 | static const struct intel_device_info intel_i915g_info = { |
182 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, | 186 | .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
183 | .has_overlay = 1, .overlay_needs_physical = 1, | 187 | .has_overlay = 1, .overlay_needs_physical = 1, |
188 | .ring_mask = RENDER_RING, | ||
184 | }; | 189 | }; |
185 | static const struct intel_device_info intel_i915gm_info = { | 190 | static const struct intel_device_info intel_i915gm_info = { |
186 | .gen = 3, .is_mobile = 1, .num_pipes = 2, | 191 | .gen = 3, .is_mobile = 1, .num_pipes = 2, |
187 | .cursor_needs_physical = 1, | 192 | .cursor_needs_physical = 1, |
188 | .has_overlay = 1, .overlay_needs_physical = 1, | 193 | .has_overlay = 1, .overlay_needs_physical = 1, |
189 | .supports_tv = 1, | 194 | .supports_tv = 1, |
195 | .ring_mask = RENDER_RING, | ||
190 | }; | 196 | }; |
191 | static const struct intel_device_info intel_i945g_info = { | 197 | static const struct intel_device_info intel_i945g_info = { |
192 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, | 198 | .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, |
193 | .has_overlay = 1, .overlay_needs_physical = 1, | 199 | .has_overlay = 1, .overlay_needs_physical = 1, |
200 | .ring_mask = RENDER_RING, | ||
194 | }; | 201 | }; |
195 | static const struct intel_device_info intel_i945gm_info = { | 202 | static const struct intel_device_info intel_i945gm_info = { |
196 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, | 203 | .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, |
197 | .has_hotplug = 1, .cursor_needs_physical = 1, | 204 | .has_hotplug = 1, .cursor_needs_physical = 1, |
198 | .has_overlay = 1, .overlay_needs_physical = 1, | 205 | .has_overlay = 1, .overlay_needs_physical = 1, |
199 | .supports_tv = 1, | 206 | .supports_tv = 1, |
207 | .ring_mask = RENDER_RING, | ||
200 | }; | 208 | }; |
201 | 209 | ||
202 | static const struct intel_device_info intel_i965g_info = { | 210 | static const struct intel_device_info intel_i965g_info = { |
203 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, | 211 | .gen = 4, .is_broadwater = 1, .num_pipes = 2, |
204 | .has_hotplug = 1, | 212 | .has_hotplug = 1, |
205 | .has_overlay = 1, | 213 | .has_overlay = 1, |
214 | .ring_mask = RENDER_RING, | ||
206 | }; | 215 | }; |
207 | 216 | ||
208 | static const struct intel_device_info intel_i965gm_info = { | 217 | static const struct intel_device_info intel_i965gm_info = { |
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = { | |||
210 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, | 219 | .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, |
211 | .has_overlay = 1, | 220 | .has_overlay = 1, |
212 | .supports_tv = 1, | 221 | .supports_tv = 1, |
222 | .ring_mask = RENDER_RING, | ||
213 | }; | 223 | }; |
214 | 224 | ||
215 | static const struct intel_device_info intel_g33_info = { | 225 | static const struct intel_device_info intel_g33_info = { |
216 | .gen = 3, .is_g33 = 1, .num_pipes = 2, | 226 | .gen = 3, .is_g33 = 1, .num_pipes = 2, |
217 | .need_gfx_hws = 1, .has_hotplug = 1, | 227 | .need_gfx_hws = 1, .has_hotplug = 1, |
218 | .has_overlay = 1, | 228 | .has_overlay = 1, |
229 | .ring_mask = RENDER_RING, | ||
219 | }; | 230 | }; |
220 | 231 | ||
221 | static const struct intel_device_info intel_g45_info = { | 232 | static const struct intel_device_info intel_g45_info = { |
222 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, | 233 | .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, |
223 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 234 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
224 | .has_bsd_ring = 1, | 235 | .ring_mask = RENDER_RING | BSD_RING, |
225 | }; | 236 | }; |
226 | 237 | ||
227 | static const struct intel_device_info intel_gm45_info = { | 238 | static const struct intel_device_info intel_gm45_info = { |
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = { | |||
229 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, | 240 | .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, |
230 | .has_pipe_cxsr = 1, .has_hotplug = 1, | 241 | .has_pipe_cxsr = 1, .has_hotplug = 1, |
231 | .supports_tv = 1, | 242 | .supports_tv = 1, |
232 | .has_bsd_ring = 1, | 243 | .ring_mask = RENDER_RING | BSD_RING, |
233 | }; | 244 | }; |
234 | 245 | ||
235 | static const struct intel_device_info intel_pineview_info = { | 246 | static const struct intel_device_info intel_pineview_info = { |
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = { | |||
241 | static const struct intel_device_info intel_ironlake_d_info = { | 252 | static const struct intel_device_info intel_ironlake_d_info = { |
242 | .gen = 5, .num_pipes = 2, | 253 | .gen = 5, .num_pipes = 2, |
243 | .need_gfx_hws = 1, .has_hotplug = 1, | 254 | .need_gfx_hws = 1, .has_hotplug = 1, |
244 | .has_bsd_ring = 1, | 255 | .ring_mask = RENDER_RING | BSD_RING, |
245 | }; | 256 | }; |
246 | 257 | ||
247 | static const struct intel_device_info intel_ironlake_m_info = { | 258 | static const struct intel_device_info intel_ironlake_m_info = { |
248 | .gen = 5, .is_mobile = 1, .num_pipes = 2, | 259 | .gen = 5, .is_mobile = 1, .num_pipes = 2, |
249 | .need_gfx_hws = 1, .has_hotplug = 1, | 260 | .need_gfx_hws = 1, .has_hotplug = 1, |
250 | .has_fbc = 1, | 261 | .has_fbc = 1, |
251 | .has_bsd_ring = 1, | 262 | .ring_mask = RENDER_RING | BSD_RING, |
252 | }; | 263 | }; |
253 | 264 | ||
254 | static const struct intel_device_info intel_sandybridge_d_info = { | 265 | static const struct intel_device_info intel_sandybridge_d_info = { |
255 | .gen = 6, .num_pipes = 2, | 266 | .gen = 6, .num_pipes = 2, |
256 | .need_gfx_hws = 1, .has_hotplug = 1, | 267 | .need_gfx_hws = 1, .has_hotplug = 1, |
257 | .has_bsd_ring = 1, | 268 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
258 | .has_blt_ring = 1, | ||
259 | .has_llc = 1, | 269 | .has_llc = 1, |
260 | .has_force_wake = 1, | ||
261 | }; | 270 | }; |
262 | 271 | ||
263 | static const struct intel_device_info intel_sandybridge_m_info = { | 272 | static const struct intel_device_info intel_sandybridge_m_info = { |
264 | .gen = 6, .is_mobile = 1, .num_pipes = 2, | 273 | .gen = 6, .is_mobile = 1, .num_pipes = 2, |
265 | .need_gfx_hws = 1, .has_hotplug = 1, | 274 | .need_gfx_hws = 1, .has_hotplug = 1, |
266 | .has_fbc = 1, | 275 | .has_fbc = 1, |
267 | .has_bsd_ring = 1, | 276 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
268 | .has_blt_ring = 1, | ||
269 | .has_llc = 1, | 277 | .has_llc = 1, |
270 | .has_force_wake = 1, | ||
271 | }; | 278 | }; |
272 | 279 | ||
273 | #define GEN7_FEATURES \ | 280 | #define GEN7_FEATURES \ |
274 | .gen = 7, .num_pipes = 3, \ | 281 | .gen = 7, .num_pipes = 3, \ |
275 | .need_gfx_hws = 1, .has_hotplug = 1, \ | 282 | .need_gfx_hws = 1, .has_hotplug = 1, \ |
276 | .has_bsd_ring = 1, \ | 283 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ |
277 | .has_blt_ring = 1, \ | 284 | .has_llc = 1 |
278 | .has_llc = 1, \ | ||
279 | .has_force_wake = 1 | ||
280 | 285 | ||
281 | static const struct intel_device_info intel_ivybridge_d_info = { | 286 | static const struct intel_device_info intel_ivybridge_d_info = { |
282 | GEN7_FEATURES, | 287 | GEN7_FEATURES, |
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = { | |||
318 | .is_haswell = 1, | 323 | .is_haswell = 1, |
319 | .has_ddi = 1, | 324 | .has_ddi = 1, |
320 | .has_fpga_dbg = 1, | 325 | .has_fpga_dbg = 1, |
321 | .has_vebox_ring = 1, | 326 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
322 | }; | 327 | }; |
323 | 328 | ||
324 | static const struct intel_device_info intel_haswell_m_info = { | 329 | static const struct intel_device_info intel_haswell_m_info = { |
@@ -328,7 +333,7 @@ static const struct intel_device_info intel_haswell_m_info = { | |||
328 | .has_ddi = 1, | 333 | .has_ddi = 1, |
329 | .has_fpga_dbg = 1, | 334 | .has_fpga_dbg = 1, |
330 | .has_fbc = 1, | 335 | .has_fbc = 1, |
331 | .has_vebox_ring = 1, | 336 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, |
332 | }; | 337 | }; |
333 | 338 | ||
334 | /* | 339 | /* |
@@ -482,9 +487,7 @@ static int i915_drm_freeze(struct drm_device *dev) | |||
482 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 487 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
483 | int error; | 488 | int error; |
484 | 489 | ||
485 | mutex_lock(&dev->struct_mutex); | 490 | error = i915_gem_suspend(dev); |
486 | error = i915_gem_idle(dev); | ||
487 | mutex_unlock(&dev->struct_mutex); | ||
488 | if (error) { | 491 | if (error) { |
489 | dev_err(&dev->pdev->dev, | 492 | dev_err(&dev->pdev->dev, |
490 | "GEM idle failed, resume might fail\n"); | 493 | "GEM idle failed, resume might fail\n"); |
@@ -747,30 +750,17 @@ int i915_reset(struct drm_device *dev) | |||
747 | */ | 750 | */ |
748 | if (drm_core_check_feature(dev, DRIVER_MODESET) || | 751 | if (drm_core_check_feature(dev, DRIVER_MODESET) || |
749 | !dev_priv->ums.mm_suspended) { | 752 | !dev_priv->ums.mm_suspended) { |
750 | struct intel_ring_buffer *ring; | 753 | bool hw_contexts_disabled = dev_priv->hw_contexts_disabled; |
751 | int i; | ||
752 | |||
753 | dev_priv->ums.mm_suspended = 0; | 754 | dev_priv->ums.mm_suspended = 0; |
754 | 755 | ||
755 | i915_gem_init_swizzling(dev); | 756 | ret = i915_gem_init_hw(dev); |
756 | 757 | if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled) | |
757 | for_each_ring(ring, dev_priv, i) | 758 | DRM_ERROR("HW contexts didn't survive reset\n"); |
758 | ring->init(ring); | ||
759 | |||
760 | i915_gem_context_init(dev); | ||
761 | if (dev_priv->mm.aliasing_ppgtt) { | ||
762 | ret = dev_priv->mm.aliasing_ppgtt->enable(dev); | ||
763 | if (ret) | ||
764 | i915_gem_cleanup_aliasing_ppgtt(dev); | ||
765 | } | ||
766 | |||
767 | /* | ||
768 | * It would make sense to re-init all the other hw state, at | ||
769 | * least the rps/rc6/emon init done within modeset_init_hw. For | ||
770 | * some unknown reason, this blows up my ilk, so don't. | ||
771 | */ | ||
772 | |||
773 | mutex_unlock(&dev->struct_mutex); | 759 | mutex_unlock(&dev->struct_mutex); |
760 | if (ret) { | ||
761 | DRM_ERROR("Failed hw init on reset %d\n", ret); | ||
762 | return ret; | ||
763 | } | ||
774 | 764 | ||
775 | drm_irq_uninstall(dev); | 765 | drm_irq_uninstall(dev); |
776 | drm_irq_install(dev); | 766 | drm_irq_install(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a5b7ab0c3fa..2ea33eebf01c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -379,7 +379,8 @@ struct drm_i915_display_funcs { | |||
379 | void (*crtc_disable)(struct drm_crtc *crtc); | 379 | void (*crtc_disable)(struct drm_crtc *crtc); |
380 | void (*off)(struct drm_crtc *crtc); | 380 | void (*off)(struct drm_crtc *crtc); |
381 | void (*write_eld)(struct drm_connector *connector, | 381 | void (*write_eld)(struct drm_connector *connector, |
382 | struct drm_crtc *crtc); | 382 | struct drm_crtc *crtc, |
383 | struct drm_display_mode *mode); | ||
383 | void (*fdi_link_train)(struct drm_crtc *crtc); | 384 | void (*fdi_link_train)(struct drm_crtc *crtc); |
384 | void (*init_clock_gating)(struct drm_device *dev); | 385 | void (*init_clock_gating)(struct drm_device *dev); |
385 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, | 386 | int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, |
@@ -399,6 +400,20 @@ struct drm_i915_display_funcs { | |||
399 | struct intel_uncore_funcs { | 400 | struct intel_uncore_funcs { |
400 | void (*force_wake_get)(struct drm_i915_private *dev_priv); | 401 | void (*force_wake_get)(struct drm_i915_private *dev_priv); |
401 | void (*force_wake_put)(struct drm_i915_private *dev_priv); | 402 | void (*force_wake_put)(struct drm_i915_private *dev_priv); |
403 | |||
404 | uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | ||
405 | uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | ||
406 | uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | ||
407 | uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); | ||
408 | |||
409 | void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, | ||
410 | uint8_t val, bool trace); | ||
411 | void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, | ||
412 | uint16_t val, bool trace); | ||
413 | void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, | ||
414 | uint32_t val, bool trace); | ||
415 | void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, | ||
416 | uint64_t val, bool trace); | ||
402 | }; | 417 | }; |
403 | 418 | ||
404 | struct intel_uncore { | 419 | struct intel_uncore { |
@@ -427,7 +442,6 @@ struct intel_uncore { | |||
427 | func(is_valleyview) sep \ | 442 | func(is_valleyview) sep \ |
428 | func(is_haswell) sep \ | 443 | func(is_haswell) sep \ |
429 | func(is_preliminary) sep \ | 444 | func(is_preliminary) sep \ |
430 | func(has_force_wake) sep \ | ||
431 | func(has_fbc) sep \ | 445 | func(has_fbc) sep \ |
432 | func(has_pipe_cxsr) sep \ | 446 | func(has_pipe_cxsr) sep \ |
433 | func(has_hotplug) sep \ | 447 | func(has_hotplug) sep \ |
@@ -435,9 +449,6 @@ struct intel_uncore { | |||
435 | func(has_overlay) sep \ | 449 | func(has_overlay) sep \ |
436 | func(overlay_needs_physical) sep \ | 450 | func(overlay_needs_physical) sep \ |
437 | func(supports_tv) sep \ | 451 | func(supports_tv) sep \ |
438 | func(has_bsd_ring) sep \ | ||
439 | func(has_blt_ring) sep \ | ||
440 | func(has_vebox_ring) sep \ | ||
441 | func(has_llc) sep \ | 452 | func(has_llc) sep \ |
442 | func(has_ddi) sep \ | 453 | func(has_ddi) sep \ |
443 | func(has_fpga_dbg) | 454 | func(has_fpga_dbg) |
@@ -449,6 +460,7 @@ struct intel_device_info { | |||
449 | u32 display_mmio_offset; | 460 | u32 display_mmio_offset; |
450 | u8 num_pipes:3; | 461 | u8 num_pipes:3; |
451 | u8 gen; | 462 | u8 gen; |
463 | u8 ring_mask; /* Rings supported by the HW */ | ||
452 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); | 464 | DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); |
453 | }; | 465 | }; |
454 | 466 | ||
@@ -849,6 +861,7 @@ struct intel_gen6_power_mgmt { | |||
849 | int last_adj; | 861 | int last_adj; |
850 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; | 862 | enum { LOW_POWER, BETWEEN, HIGH_POWER } power; |
851 | 863 | ||
864 | bool enabled; | ||
852 | struct delayed_work delayed_resume_work; | 865 | struct delayed_work delayed_resume_work; |
853 | 866 | ||
854 | /* | 867 | /* |
@@ -1127,6 +1140,15 @@ struct intel_wm_level { | |||
1127 | uint32_t fbc_val; | 1140 | uint32_t fbc_val; |
1128 | }; | 1141 | }; |
1129 | 1142 | ||
1143 | struct hsw_wm_values { | ||
1144 | uint32_t wm_pipe[3]; | ||
1145 | uint32_t wm_lp[3]; | ||
1146 | uint32_t wm_lp_spr[3]; | ||
1147 | uint32_t wm_linetime[3]; | ||
1148 | bool enable_fbc_wm; | ||
1149 | enum intel_ddb_partitioning partitioning; | ||
1150 | }; | ||
1151 | |||
1130 | /* | 1152 | /* |
1131 | * This struct tracks the state needed for the Package C8+ feature. | 1153 | * This struct tracks the state needed for the Package C8+ feature. |
1132 | * | 1154 | * |
@@ -1196,6 +1218,29 @@ struct i915_package_c8 { | |||
1196 | } regsave; | 1218 | } regsave; |
1197 | }; | 1219 | }; |
1198 | 1220 | ||
1221 | enum intel_pipe_crc_source { | ||
1222 | INTEL_PIPE_CRC_SOURCE_NONE, | ||
1223 | INTEL_PIPE_CRC_SOURCE_PLANE1, | ||
1224 | INTEL_PIPE_CRC_SOURCE_PLANE2, | ||
1225 | INTEL_PIPE_CRC_SOURCE_PF, | ||
1226 | INTEL_PIPE_CRC_SOURCE_PIPE, | ||
1227 | INTEL_PIPE_CRC_SOURCE_MAX, | ||
1228 | }; | ||
1229 | |||
1230 | struct intel_pipe_crc_entry { | ||
1231 | uint32_t frame; | ||
1232 | uint32_t crc[5]; | ||
1233 | }; | ||
1234 | |||
1235 | #define INTEL_PIPE_CRC_ENTRIES_NR 128 | ||
1236 | struct intel_pipe_crc { | ||
1237 | atomic_t available; /* exclusive access to the device */ | ||
1238 | struct intel_pipe_crc_entry *entries; | ||
1239 | enum intel_pipe_crc_source source; | ||
1240 | atomic_t head, tail; | ||
1241 | wait_queue_head_t wq; | ||
1242 | }; | ||
1243 | |||
1199 | typedef struct drm_i915_private { | 1244 | typedef struct drm_i915_private { |
1200 | struct drm_device *dev; | 1245 | struct drm_device *dev; |
1201 | struct kmem_cache *slab; | 1246 | struct kmem_cache *slab; |
@@ -1354,8 +1399,10 @@ typedef struct drm_i915_private { | |||
1354 | 1399 | ||
1355 | struct drm_i915_gem_object *vlv_pctx; | 1400 | struct drm_i915_gem_object *vlv_pctx; |
1356 | 1401 | ||
1402 | #ifdef CONFIG_DRM_I915_FBDEV | ||
1357 | /* list of fbdev register on this device */ | 1403 | /* list of fbdev register on this device */ |
1358 | struct intel_fbdev *fbdev; | 1404 | struct intel_fbdev *fbdev; |
1405 | #endif | ||
1359 | 1406 | ||
1360 | /* | 1407 | /* |
1361 | * The console may be contended at resume, but we don't | 1408 | * The console may be contended at resume, but we don't |
@@ -1386,6 +1433,9 @@ typedef struct drm_i915_private { | |||
1386 | uint16_t spr_latency[5]; | 1433 | uint16_t spr_latency[5]; |
1387 | /* cursor */ | 1434 | /* cursor */ |
1388 | uint16_t cur_latency[5]; | 1435 | uint16_t cur_latency[5]; |
1436 | |||
1437 | /* current hardware state */ | ||
1438 | struct hsw_wm_values hw; | ||
1389 | } wm; | 1439 | } wm; |
1390 | 1440 | ||
1391 | struct i915_package_c8 pc8; | 1441 | struct i915_package_c8 pc8; |
@@ -1395,6 +1445,10 @@ typedef struct drm_i915_private { | |||
1395 | struct i915_dri1_state dri1; | 1445 | struct i915_dri1_state dri1; |
1396 | /* Old ums support infrastructure, same warning applies. */ | 1446 | /* Old ums support infrastructure, same warning applies. */ |
1397 | struct i915_ums_state ums; | 1447 | struct i915_ums_state ums; |
1448 | |||
1449 | #ifdef CONFIG_DEBUG_FS | ||
1450 | struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; | ||
1451 | #endif | ||
1398 | } drm_i915_private_t; | 1452 | } drm_i915_private_t; |
1399 | 1453 | ||
1400 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) | 1454 | static inline struct drm_i915_private *to_i915(const struct drm_device *dev) |
@@ -1545,11 +1599,14 @@ struct drm_i915_gem_object { | |||
1545 | /** Current tiling stride for the object, if it's tiled. */ | 1599 | /** Current tiling stride for the object, if it's tiled. */ |
1546 | uint32_t stride; | 1600 | uint32_t stride; |
1547 | 1601 | ||
1602 | /** References from framebuffers, locks out tiling changes. */ | ||
1603 | unsigned long framebuffer_references; | ||
1604 | |||
1548 | /** Record of address bit 17 of each page at last unbind. */ | 1605 | /** Record of address bit 17 of each page at last unbind. */ |
1549 | unsigned long *bit_17; | 1606 | unsigned long *bit_17; |
1550 | 1607 | ||
1551 | /** User space pin count and filp owning the pin */ | 1608 | /** User space pin count and filp owning the pin */ |
1552 | uint32_t user_pin_count; | 1609 | unsigned long user_pin_count; |
1553 | struct drm_file *pin_filp; | 1610 | struct drm_file *pin_filp; |
1554 | 1611 | ||
1555 | /** for phy allocated objects */ | 1612 | /** for phy allocated objects */ |
@@ -1663,9 +1720,13 @@ struct drm_i915_file_private { | |||
1663 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) | 1720 | #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) |
1664 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) | 1721 | #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) |
1665 | 1722 | ||
1666 | #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) | 1723 | #define RENDER_RING (1<<RCS) |
1667 | #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) | 1724 | #define BSD_RING (1<<VCS) |
1668 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) | 1725 | #define BLT_RING (1<<BCS) |
1726 | #define VEBOX_RING (1<<VECS) | ||
1727 | #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) | ||
1728 | #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) | ||
1729 | #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) | ||
1669 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) | 1730 | #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) |
1670 | #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) | 1731 | #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) |
1671 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) | 1732 | #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) |
@@ -1715,8 +1776,6 @@ struct drm_i915_file_private { | |||
1715 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) | 1776 | #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) |
1716 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) | 1777 | #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) |
1717 | 1778 | ||
1718 | #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) | ||
1719 | |||
1720 | /* DPF == dynamic parity feature */ | 1779 | /* DPF == dynamic parity feature */ |
1721 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1780 | #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
1722 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) | 1781 | #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) |
@@ -1983,7 +2042,7 @@ int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); | |||
1983 | void i915_gem_init_swizzling(struct drm_device *dev); | 2042 | void i915_gem_init_swizzling(struct drm_device *dev); |
1984 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 2043 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1985 | int __must_check i915_gpu_idle(struct drm_device *dev); | 2044 | int __must_check i915_gpu_idle(struct drm_device *dev); |
1986 | int __must_check i915_gem_idle(struct drm_device *dev); | 2045 | int __must_check i915_gem_suspend(struct drm_device *dev); |
1987 | int __i915_add_request(struct intel_ring_buffer *ring, | 2046 | int __i915_add_request(struct intel_ring_buffer *ring, |
1988 | struct drm_file *file, | 2047 | struct drm_file *file, |
1989 | struct drm_i915_gem_object *batch_obj, | 2048 | struct drm_i915_gem_object *batch_obj, |
@@ -2181,6 +2240,11 @@ int i915_verify_lists(struct drm_device *dev); | |||
2181 | /* i915_debugfs.c */ | 2240 | /* i915_debugfs.c */ |
2182 | int i915_debugfs_init(struct drm_minor *minor); | 2241 | int i915_debugfs_init(struct drm_minor *minor); |
2183 | void i915_debugfs_cleanup(struct drm_minor *minor); | 2242 | void i915_debugfs_cleanup(struct drm_minor *minor); |
2243 | #ifdef CONFIG_DEBUG_FS | ||
2244 | void intel_display_crc_init(struct drm_device *dev); | ||
2245 | #else | ||
2246 | static inline void intel_display_crc_init(struct drm_device *dev) {} | ||
2247 | #endif | ||
2184 | 2248 | ||
2185 | /* i915_gpu_error.c */ | 2249 | /* i915_gpu_error.c */ |
2186 | __printf(2, 3) | 2250 | __printf(2, 3) |
@@ -2337,37 +2401,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, | |||
2337 | int vlv_gpu_freq(int ddr_freq, int val); | 2401 | int vlv_gpu_freq(int ddr_freq, int val); |
2338 | int vlv_freq_opcode(int ddr_freq, int val); | 2402 | int vlv_freq_opcode(int ddr_freq, int val); |
2339 | 2403 | ||
2340 | #define __i915_read(x) \ | 2404 | #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) |
2341 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); | 2405 | #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) |
2342 | __i915_read(8) | 2406 | |
2343 | __i915_read(16) | 2407 | #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) |
2344 | __i915_read(32) | 2408 | #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) |
2345 | __i915_read(64) | 2409 | #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) |
2346 | #undef __i915_read | 2410 | #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) |
2347 | 2411 | ||
2348 | #define __i915_write(x) \ | 2412 | #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) |
2349 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); | 2413 | #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) |
2350 | __i915_write(8) | 2414 | #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) |
2351 | __i915_write(16) | 2415 | #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) |
2352 | __i915_write(32) | 2416 | |
2353 | __i915_write(64) | 2417 | #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) |
2354 | #undef __i915_write | 2418 | #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) |
2355 | |||
2356 | #define I915_READ8(reg) i915_read8(dev_priv, (reg), true) | ||
2357 | #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) | ||
2358 | |||
2359 | #define I915_READ16(reg) i915_read16(dev_priv, (reg), true) | ||
2360 | #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) | ||
2361 | #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) | ||
2362 | #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) | ||
2363 | |||
2364 | #define I915_READ(reg) i915_read32(dev_priv, (reg), true) | ||
2365 | #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) | ||
2366 | #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) | ||
2367 | #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) | ||
2368 | |||
2369 | #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) | ||
2370 | #define I915_READ64(reg) i915_read64(dev_priv, (reg), true) | ||
2371 | 2419 | ||
2372 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) | 2420 | #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
2373 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) | 2421 | #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 13c885d66383..34df59b660f8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -64,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, | |||
64 | struct shrink_control *sc); | 64 | struct shrink_control *sc); |
65 | static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, | 65 | static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, |
66 | struct shrink_control *sc); | 66 | struct shrink_control *sc); |
67 | static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); | 67 | static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
68 | static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); | 68 | static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
69 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); | 69 | static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); |
70 | 70 | ||
71 | static bool cpu_cache_is_coherent(struct drm_device *dev, | 71 | static bool cpu_cache_is_coherent(struct drm_device *dev, |
@@ -1082,7 +1082,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, | |||
1082 | mod_timer(&timer, expire); | 1082 | mod_timer(&timer, expire); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | schedule(); | 1085 | io_schedule(); |
1086 | 1086 | ||
1087 | if (timeout) | 1087 | if (timeout) |
1088 | timeout_jiffies = expire - jiffies; | 1088 | timeout_jiffies = expire - jiffies; |
@@ -1728,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) | |||
1728 | return 0; | 1728 | return 0; |
1729 | } | 1729 | } |
1730 | 1730 | ||
1731 | static long | 1731 | static unsigned long |
1732 | __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | 1732 | __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, |
1733 | bool purgeable_only) | 1733 | bool purgeable_only) |
1734 | { | 1734 | { |
1735 | struct list_head still_bound_list; | 1735 | struct list_head still_bound_list; |
1736 | struct drm_i915_gem_object *obj, *next; | 1736 | struct drm_i915_gem_object *obj, *next; |
1737 | long count = 0; | 1737 | unsigned long count = 0; |
1738 | 1738 | ||
1739 | list_for_each_entry_safe(obj, next, | 1739 | list_for_each_entry_safe(obj, next, |
1740 | &dev_priv->mm.unbound_list, | 1740 | &dev_priv->mm.unbound_list, |
@@ -1800,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, | |||
1800 | return count; | 1800 | return count; |
1801 | } | 1801 | } |
1802 | 1802 | ||
1803 | static long | 1803 | static unsigned long |
1804 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) | 1804 | i915_gem_purge(struct drm_i915_private *dev_priv, long target) |
1805 | { | 1805 | { |
1806 | return __i915_gem_shrink(dev_priv, target, true); | 1806 | return __i915_gem_shrink(dev_priv, target, true); |
1807 | } | 1807 | } |
1808 | 1808 | ||
1809 | static long | 1809 | static unsigned long |
1810 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) | 1810 | i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
1811 | { | 1811 | { |
1812 | struct drm_i915_gem_object *obj, *next; | 1812 | struct drm_i915_gem_object *obj, *next; |
@@ -1816,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
1816 | 1816 | ||
1817 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, | 1817 | list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, |
1818 | global_list) { | 1818 | global_list) { |
1819 | if (obj->pages_pin_count == 0) | 1819 | if (i915_gem_object_put_pages(obj) == 0) |
1820 | freed += obj->base.size >> PAGE_SHIFT; | 1820 | freed += obj->base.size >> PAGE_SHIFT; |
1821 | i915_gem_object_put_pages(obj); | ||
1822 | } | 1821 | } |
1823 | return freed; | 1822 | return freed; |
1824 | } | 1823 | } |
@@ -1903,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
1903 | sg->length += PAGE_SIZE; | 1902 | sg->length += PAGE_SIZE; |
1904 | } | 1903 | } |
1905 | last_pfn = page_to_pfn(page); | 1904 | last_pfn = page_to_pfn(page); |
1905 | |||
1906 | /* Check that the i965g/gm workaround works. */ | ||
1907 | WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); | ||
1906 | } | 1908 | } |
1907 | #ifdef CONFIG_SWIOTLB | 1909 | #ifdef CONFIG_SWIOTLB |
1908 | if (!swiotlb_nr_tbl()) | 1910 | if (!swiotlb_nr_tbl()) |
@@ -2404,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev) | |||
2404 | for_each_ring(ring, dev_priv, i) | 2406 | for_each_ring(ring, dev_priv, i) |
2405 | i915_gem_reset_ring_lists(dev_priv, ring); | 2407 | i915_gem_reset_ring_lists(dev_priv, ring); |
2406 | 2408 | ||
2409 | i915_gem_cleanup_ringbuffer(dev); | ||
2410 | |||
2407 | i915_gem_restore_fences(dev); | 2411 | i915_gem_restore_fences(dev); |
2408 | } | 2412 | } |
2409 | 2413 | ||
@@ -3927,6 +3931,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
3927 | goto out; | 3931 | goto out; |
3928 | } | 3932 | } |
3929 | 3933 | ||
3934 | if (obj->user_pin_count == ULONG_MAX) { | ||
3935 | ret = -EBUSY; | ||
3936 | goto out; | ||
3937 | } | ||
3938 | |||
3930 | if (obj->user_pin_count == 0) { | 3939 | if (obj->user_pin_count == 0) { |
3931 | ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); | 3940 | ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); |
3932 | if (ret) | 3941 | if (ret) |
@@ -4261,17 +4270,18 @@ void i915_gem_vma_destroy(struct i915_vma *vma) | |||
4261 | } | 4270 | } |
4262 | 4271 | ||
4263 | int | 4272 | int |
4264 | i915_gem_idle(struct drm_device *dev) | 4273 | i915_gem_suspend(struct drm_device *dev) |
4265 | { | 4274 | { |
4266 | drm_i915_private_t *dev_priv = dev->dev_private; | 4275 | drm_i915_private_t *dev_priv = dev->dev_private; |
4267 | int ret; | 4276 | int ret = 0; |
4268 | 4277 | ||
4278 | mutex_lock(&dev->struct_mutex); | ||
4269 | if (dev_priv->ums.mm_suspended) | 4279 | if (dev_priv->ums.mm_suspended) |
4270 | return 0; | 4280 | goto err; |
4271 | 4281 | ||
4272 | ret = i915_gpu_idle(dev); | 4282 | ret = i915_gpu_idle(dev); |
4273 | if (ret) | 4283 | if (ret) |
4274 | return ret; | 4284 | goto err; |
4275 | 4285 | ||
4276 | i915_gem_retire_requests(dev); | 4286 | i915_gem_retire_requests(dev); |
4277 | 4287 | ||
@@ -4279,16 +4289,26 @@ i915_gem_idle(struct drm_device *dev) | |||
4279 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | 4289 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4280 | i915_gem_evict_everything(dev); | 4290 | i915_gem_evict_everything(dev); |
4281 | 4291 | ||
4282 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | ||
4283 | |||
4284 | i915_kernel_lost_context(dev); | 4292 | i915_kernel_lost_context(dev); |
4285 | i915_gem_cleanup_ringbuffer(dev); | 4293 | i915_gem_cleanup_ringbuffer(dev); |
4286 | 4294 | ||
4287 | /* Cancel the retire work handler, which should be idle now. */ | 4295 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
4296 | * We need to replace this with a semaphore, or something. | ||
4297 | * And not confound ums.mm_suspended! | ||
4298 | */ | ||
4299 | dev_priv->ums.mm_suspended = !drm_core_check_feature(dev, | ||
4300 | DRIVER_MODESET); | ||
4301 | mutex_unlock(&dev->struct_mutex); | ||
4302 | |||
4303 | del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); | ||
4288 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); | 4304 | cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
4289 | cancel_delayed_work_sync(&dev_priv->mm.idle_work); | 4305 | cancel_delayed_work_sync(&dev_priv->mm.idle_work); |
4290 | 4306 | ||
4291 | return 0; | 4307 | return 0; |
4308 | |||
4309 | err: | ||
4310 | mutex_unlock(&dev->struct_mutex); | ||
4311 | return ret; | ||
4292 | } | 4312 | } |
4293 | 4313 | ||
4294 | int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) | 4314 | int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) |
@@ -4541,26 +4561,12 @@ int | |||
4541 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | 4561 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
4542 | struct drm_file *file_priv) | 4562 | struct drm_file *file_priv) |
4543 | { | 4563 | { |
4544 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
4545 | int ret; | ||
4546 | |||
4547 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4564 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4548 | return 0; | 4565 | return 0; |
4549 | 4566 | ||
4550 | drm_irq_uninstall(dev); | 4567 | drm_irq_uninstall(dev); |
4551 | 4568 | ||
4552 | mutex_lock(&dev->struct_mutex); | 4569 | return i915_gem_suspend(dev); |
4553 | ret = i915_gem_idle(dev); | ||
4554 | |||
4555 | /* Hack! Don't let anybody do execbuf while we don't control the chip. | ||
4556 | * We need to replace this with a semaphore, or something. | ||
4557 | * And not confound ums.mm_suspended! | ||
4558 | */ | ||
4559 | if (ret != 0) | ||
4560 | dev_priv->ums.mm_suspended = 1; | ||
4561 | mutex_unlock(&dev->struct_mutex); | ||
4562 | |||
4563 | return ret; | ||
4564 | } | 4570 | } |
4565 | 4571 | ||
4566 | void | 4572 | void |
@@ -4571,11 +4577,9 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4571 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 4577 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4572 | return; | 4578 | return; |
4573 | 4579 | ||
4574 | mutex_lock(&dev->struct_mutex); | 4580 | ret = i915_gem_suspend(dev); |
4575 | ret = i915_gem_idle(dev); | ||
4576 | if (ret) | 4581 | if (ret) |
4577 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 4582 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4578 | mutex_unlock(&dev->struct_mutex); | ||
4579 | } | 4583 | } |
4580 | 4584 | ||
4581 | static void | 4585 | static void |
@@ -4947,6 +4951,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) | |||
4947 | 4951 | ||
4948 | if (unlock) | 4952 | if (unlock) |
4949 | mutex_unlock(&dev->struct_mutex); | 4953 | mutex_unlock(&dev->struct_mutex); |
4954 | |||
4950 | return count; | 4955 | return count; |
4951 | } | 4956 | } |
4952 | 4957 | ||
@@ -5018,7 +5023,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) | |||
5018 | struct drm_i915_private, | 5023 | struct drm_i915_private, |
5019 | mm.inactive_shrinker); | 5024 | mm.inactive_shrinker); |
5020 | struct drm_device *dev = dev_priv->dev; | 5025 | struct drm_device *dev = dev_priv->dev; |
5021 | int nr_to_scan = sc->nr_to_scan; | ||
5022 | unsigned long freed; | 5026 | unsigned long freed; |
5023 | bool unlock = true; | 5027 | bool unlock = true; |
5024 | 5028 | ||
@@ -5032,15 +5036,17 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) | |||
5032 | unlock = false; | 5036 | unlock = false; |
5033 | } | 5037 | } |
5034 | 5038 | ||
5035 | freed = i915_gem_purge(dev_priv, nr_to_scan); | 5039 | freed = i915_gem_purge(dev_priv, sc->nr_to_scan); |
5036 | if (freed < nr_to_scan) | 5040 | if (freed < sc->nr_to_scan) |
5037 | freed += __i915_gem_shrink(dev_priv, nr_to_scan, | 5041 | freed += __i915_gem_shrink(dev_priv, |
5038 | false); | 5042 | sc->nr_to_scan - freed, |
5039 | if (freed < nr_to_scan) | 5043 | false); |
5044 | if (freed < sc->nr_to_scan) | ||
5040 | freed += i915_gem_shrink_all(dev_priv); | 5045 | freed += i915_gem_shrink_all(dev_priv); |
5041 | 5046 | ||
5042 | if (unlock) | 5047 | if (unlock) |
5043 | mutex_unlock(&dev->struct_mutex); | 5048 | mutex_unlock(&dev->struct_mutex); |
5049 | |||
5044 | return freed; | 5050 | return freed; |
5045 | } | 5051 | } |
5046 | 5052 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1a877a547290..cc619c138777 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -220,7 +220,6 @@ static int create_default_context(struct drm_i915_private *dev_priv) | |||
220 | * may not be available. To avoid this we always pin the | 220 | * may not be available. To avoid this we always pin the |
221 | * default context. | 221 | * default context. |
222 | */ | 222 | */ |
223 | dev_priv->ring[RCS].default_context = ctx; | ||
224 | ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); | 223 | ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); |
225 | if (ret) { | 224 | if (ret) { |
226 | DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); | 225 | DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); |
@@ -233,6 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv) | |||
233 | goto err_unpin; | 232 | goto err_unpin; |
234 | } | 233 | } |
235 | 234 | ||
235 | dev_priv->ring[RCS].default_context = ctx; | ||
236 | |||
236 | DRM_DEBUG_DRIVER("Default HW context loaded\n"); | 237 | DRM_DEBUG_DRIVER("Default HW context loaded\n"); |
237 | return 0; | 238 | return 0; |
238 | 239 | ||
@@ -288,16 +289,24 @@ void i915_gem_context_fini(struct drm_device *dev) | |||
288 | * other code, leading to spurious errors. */ | 289 | * other code, leading to spurious errors. */ |
289 | intel_gpu_reset(dev); | 290 | intel_gpu_reset(dev); |
290 | 291 | ||
291 | i915_gem_object_unpin(dctx->obj); | ||
292 | |||
293 | /* When default context is created and switched to, base object refcount | 292 | /* When default context is created and switched to, base object refcount |
294 | * will be 2 (+1 from object creation and +1 from do_switch()). | 293 | * will be 2 (+1 from object creation and +1 from do_switch()). |
295 | * i915_gem_context_fini() will be called after gpu_idle() has switched | 294 | * i915_gem_context_fini() will be called after gpu_idle() has switched |
296 | * to default context. So we need to unreference the base object once | 295 | * to default context. So we need to unreference the base object once |
297 | * to offset the do_switch part, so that i915_gem_context_unreference() | 296 | * to offset the do_switch part, so that i915_gem_context_unreference() |
298 | * can then free the base object correctly. */ | 297 | * can then free the base object correctly. */ |
299 | drm_gem_object_unreference(&dctx->obj->base); | 298 | WARN_ON(!dev_priv->ring[RCS].last_context); |
299 | if (dev_priv->ring[RCS].last_context == dctx) { | ||
300 | /* Fake switch to NULL context */ | ||
301 | WARN_ON(dctx->obj->active); | ||
302 | i915_gem_object_unpin(dctx->obj); | ||
303 | i915_gem_context_unreference(dctx); | ||
304 | } | ||
305 | |||
306 | i915_gem_object_unpin(dctx->obj); | ||
300 | i915_gem_context_unreference(dctx); | 307 | i915_gem_context_unreference(dctx); |
308 | dev_priv->ring[RCS].default_context = NULL; | ||
309 | dev_priv->ring[RCS].last_context = NULL; | ||
301 | } | 310 | } |
302 | 311 | ||
303 | static int context_idr_cleanup(int id, void *p, void *data) | 312 | static int context_idr_cleanup(int id, void *p, void *data) |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index ac9ebe98f8b0..b13905348048 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
308 | return -EINVAL; | 308 | return -EINVAL; |
309 | } | 309 | } |
310 | 310 | ||
311 | if (obj->pin_count) { | 311 | if (obj->pin_count || obj->framebuffer_references) { |
312 | drm_gem_object_unreference_unlocked(&obj->base); | 312 | drm_gem_object_unreference_unlocked(&obj->base); |
313 | return -EBUSY; | 313 | return -EBUSY; |
314 | } | 314 | } |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 915c8ca08969..5dde81026471 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -910,8 +910,12 @@ void i915_capture_error_state(struct drm_device *dev) | |||
910 | return; | 910 | return; |
911 | } | 911 | } |
912 | 912 | ||
913 | DRM_INFO("capturing error event; look for more information in " | 913 | DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", |
914 | "/sys/class/drm/card%d/error\n", dev->primary->index); | 914 | dev->primary->index); |
915 | DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n"); | ||
916 | DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); | ||
917 | DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); | ||
918 | DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); | ||
915 | 919 | ||
916 | kref_init(&error->ref); | 920 | kref_init(&error->ref); |
917 | error->eir = I915_READ(EIR); | 921 | error->eir = I915_READ(EIR); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d1739d3bdae9..eb67bd98bc42 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <linux/sysrq.h> | 31 | #include <linux/sysrq.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/circ_buf.h> | ||
33 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
34 | #include <drm/i915_drm.h> | 35 | #include <drm/i915_drm.h> |
35 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
@@ -518,6 +519,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe) | |||
518 | } | 519 | } |
519 | } | 520 | } |
520 | 521 | ||
522 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) | ||
523 | { | ||
524 | /* Gen2 doesn't have a hardware frame counter */ | ||
525 | return 0; | ||
526 | } | ||
527 | |||
521 | /* Called from drm generic code, passed a 'crtc', which | 528 | /* Called from drm generic code, passed a 'crtc', which |
522 | * we use as a pipe index | 529 | * we use as a pipe index |
523 | */ | 530 | */ |
@@ -526,7 +533,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 533 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
527 | unsigned long high_frame; | 534 | unsigned long high_frame; |
528 | unsigned long low_frame; | 535 | unsigned long low_frame; |
529 | u32 high1, high2, low; | 536 | u32 high1, high2, low, pixel, vbl_start; |
530 | 537 | ||
531 | if (!i915_pipe_enabled(dev, pipe)) { | 538 | if (!i915_pipe_enabled(dev, pipe)) { |
532 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " | 539 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
@@ -534,6 +541,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
534 | return 0; | 541 | return 0; |
535 | } | 542 | } |
536 | 543 | ||
544 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | ||
545 | struct intel_crtc *intel_crtc = | ||
546 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
547 | const struct drm_display_mode *mode = | ||
548 | &intel_crtc->config.adjusted_mode; | ||
549 | |||
550 | vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; | ||
551 | } else { | ||
552 | enum transcoder cpu_transcoder = | ||
553 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | ||
554 | u32 htotal; | ||
555 | |||
556 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | ||
557 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; | ||
558 | |||
559 | vbl_start *= htotal; | ||
560 | } | ||
561 | |||
537 | high_frame = PIPEFRAME(pipe); | 562 | high_frame = PIPEFRAME(pipe); |
538 | low_frame = PIPEFRAMEPIXEL(pipe); | 563 | low_frame = PIPEFRAMEPIXEL(pipe); |
539 | 564 | ||
@@ -544,13 +569,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) | |||
544 | */ | 569 | */ |
545 | do { | 570 | do { |
546 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 571 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
547 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | 572 | low = I915_READ(low_frame); |
548 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | 573 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
549 | } while (high1 != high2); | 574 | } while (high1 != high2); |
550 | 575 | ||
551 | high1 >>= PIPE_FRAME_HIGH_SHIFT; | 576 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
577 | pixel = low & PIPE_PIXEL_MASK; | ||
552 | low >>= PIPE_FRAME_LOW_SHIFT; | 578 | low >>= PIPE_FRAME_LOW_SHIFT; |
553 | return (high1 << 8) | low; | 579 | |
580 | /* | ||
581 | * The frame counter increments at beginning of active. | ||
582 | * Cook up a vblank counter by also checking the pixel | ||
583 | * counter against vblank start. | ||
584 | */ | ||
585 | return ((high1 << 8) | low) + (pixel >= vbl_start); | ||
554 | } | 586 | } |
555 | 587 | ||
556 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | 588 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
@@ -567,37 +599,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
567 | return I915_READ(reg); | 599 | return I915_READ(reg); |
568 | } | 600 | } |
569 | 601 | ||
602 | static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe) | ||
603 | { | ||
604 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
605 | uint32_t status; | ||
606 | |||
607 | if (IS_VALLEYVIEW(dev)) { | ||
608 | status = pipe == PIPE_A ? | ||
609 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
610 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
611 | |||
612 | return I915_READ(VLV_ISR) & status; | ||
613 | } else if (IS_GEN2(dev)) { | ||
614 | status = pipe == PIPE_A ? | ||
615 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
616 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
617 | |||
618 | return I915_READ16(ISR) & status; | ||
619 | } else if (INTEL_INFO(dev)->gen < 5) { | ||
620 | status = pipe == PIPE_A ? | ||
621 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT : | ||
622 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | ||
623 | |||
624 | return I915_READ(ISR) & status; | ||
625 | } else if (INTEL_INFO(dev)->gen < 7) { | ||
626 | status = pipe == PIPE_A ? | ||
627 | DE_PIPEA_VBLANK : | ||
628 | DE_PIPEB_VBLANK; | ||
629 | |||
630 | return I915_READ(DEISR) & status; | ||
631 | } else { | ||
632 | switch (pipe) { | ||
633 | default: | ||
634 | case PIPE_A: | ||
635 | status = DE_PIPEA_VBLANK_IVB; | ||
636 | break; | ||
637 | case PIPE_B: | ||
638 | status = DE_PIPEB_VBLANK_IVB; | ||
639 | break; | ||
640 | case PIPE_C: | ||
641 | status = DE_PIPEC_VBLANK_IVB; | ||
642 | break; | ||
643 | } | ||
644 | |||
645 | return I915_READ(DEISR) & status; | ||
646 | } | ||
647 | } | ||
648 | |||
570 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 649 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
571 | int *vpos, int *hpos) | 650 | int *vpos, int *hpos) |
572 | { | 651 | { |
573 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 652 | struct drm_i915_private *dev_priv = dev->dev_private; |
574 | u32 vbl = 0, position = 0; | 653 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
654 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
655 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; | ||
656 | int position; | ||
575 | int vbl_start, vbl_end, htotal, vtotal; | 657 | int vbl_start, vbl_end, htotal, vtotal; |
576 | bool in_vbl = true; | 658 | bool in_vbl = true; |
577 | int ret = 0; | 659 | int ret = 0; |
578 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | ||
579 | pipe); | ||
580 | 660 | ||
581 | if (!i915_pipe_enabled(dev, pipe)) { | 661 | if (!intel_crtc->active) { |
582 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | 662 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
583 | "pipe %c\n", pipe_name(pipe)); | 663 | "pipe %c\n", pipe_name(pipe)); |
584 | return 0; | 664 | return 0; |
585 | } | 665 | } |
586 | 666 | ||
587 | /* Get vtotal. */ | 667 | htotal = mode->crtc_htotal; |
588 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | 668 | vtotal = mode->crtc_vtotal; |
669 | vbl_start = mode->crtc_vblank_start; | ||
670 | vbl_end = mode->crtc_vblank_end; | ||
589 | 671 | ||
590 | if (INTEL_INFO(dev)->gen >= 4) { | 672 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
673 | |||
674 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
591 | /* No obvious pixelcount register. Only query vertical | 675 | /* No obvious pixelcount register. Only query vertical |
592 | * scanout position from Display scan line register. | 676 | * scanout position from Display scan line register. |
593 | */ | 677 | */ |
594 | position = I915_READ(PIPEDSL(pipe)); | 678 | if (IS_GEN2(dev)) |
679 | position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; | ||
680 | else | ||
681 | position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | ||
595 | 682 | ||
596 | /* Decode into vertical scanout position. Don't have | 683 | /* |
597 | * horizontal scanout position. | 684 | * The scanline counter increments at the leading edge |
685 | * of hsync, ie. it completely misses the active portion | ||
686 | * of the line. Fix up the counter at both edges of vblank | ||
687 | * to get a more accurate picture whether we're in vblank | ||
688 | * or not. | ||
598 | */ | 689 | */ |
599 | *vpos = position & 0x1fff; | 690 | in_vbl = intel_pipe_in_vblank(dev, pipe); |
600 | *hpos = 0; | 691 | if ((in_vbl && position == vbl_start - 1) || |
692 | (!in_vbl && position == vbl_end - 1)) | ||
693 | position = (position + 1) % vtotal; | ||
601 | } else { | 694 | } else { |
602 | /* Have access to pixelcount since start of frame. | 695 | /* Have access to pixelcount since start of frame. |
603 | * We can split this into vertical and horizontal | 696 | * We can split this into vertical and horizontal |
@@ -605,28 +698,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
605 | */ | 698 | */ |
606 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | 699 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
607 | 700 | ||
608 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); | 701 | /* convert to pixel counts */ |
609 | *vpos = position / htotal; | 702 | vbl_start *= htotal; |
610 | *hpos = position - (*vpos * htotal); | 703 | vbl_end *= htotal; |
704 | vtotal *= htotal; | ||
611 | } | 705 | } |
612 | 706 | ||
613 | /* Query vblank area. */ | 707 | in_vbl = position >= vbl_start && position < vbl_end; |
614 | vbl = I915_READ(VBLANK(cpu_transcoder)); | ||
615 | 708 | ||
616 | /* Test position against vblank region. */ | 709 | /* |
617 | vbl_start = vbl & 0x1fff; | 710 | * While in vblank, position will be negative |
618 | vbl_end = (vbl >> 16) & 0x1fff; | 711 | * counting up towards 0 at vbl_end. And outside |
619 | 712 | * vblank, position will be positive counting | |
620 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | 713 | * up since vbl_end. |
621 | in_vbl = false; | 714 | */ |
622 | 715 | if (position >= vbl_start) | |
623 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | 716 | position -= vbl_end; |
624 | if (in_vbl && (*vpos >= vbl_start)) | 717 | else |
625 | *vpos = *vpos - vtotal; | 718 | position += vtotal - vbl_end; |
626 | 719 | ||
627 | /* Readouts valid? */ | 720 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
628 | if (vbl > 0) | 721 | *vpos = position; |
629 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | 722 | *hpos = 0; |
723 | } else { | ||
724 | *vpos = position / htotal; | ||
725 | *hpos = position - (*vpos * htotal); | ||
726 | } | ||
630 | 727 | ||
631 | /* In vblank? */ | 728 | /* In vblank? */ |
632 | if (in_vbl) | 729 | if (in_vbl) |
@@ -1092,6 +1189,83 @@ static void dp_aux_irq_handler(struct drm_device *dev) | |||
1092 | wake_up_all(&dev_priv->gmbus_wait_queue); | 1189 | wake_up_all(&dev_priv->gmbus_wait_queue); |
1093 | } | 1190 | } |
1094 | 1191 | ||
1192 | #if defined(CONFIG_DEBUG_FS) | ||
1193 | static void display_pipe_crc_update(struct drm_device *dev, enum pipe pipe, | ||
1194 | uint32_t crc0, uint32_t crc1, | ||
1195 | uint32_t crc2, uint32_t crc3, | ||
1196 | uint32_t crc4) | ||
1197 | { | ||
1198 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1199 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | ||
1200 | struct intel_pipe_crc_entry *entry; | ||
1201 | int head, tail; | ||
1202 | |||
1203 | if (!pipe_crc->entries) { | ||
1204 | DRM_ERROR("spurious interrupt\n"); | ||
1205 | return; | ||
1206 | } | ||
1207 | |||
1208 | head = atomic_read(&pipe_crc->head); | ||
1209 | tail = atomic_read(&pipe_crc->tail); | ||
1210 | |||
1211 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | ||
1212 | DRM_ERROR("CRC buffer overflowing\n"); | ||
1213 | return; | ||
1214 | } | ||
1215 | |||
1216 | entry = &pipe_crc->entries[head]; | ||
1217 | |||
1218 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); | ||
1219 | entry->crc[0] = crc0; | ||
1220 | entry->crc[1] = crc1; | ||
1221 | entry->crc[2] = crc2; | ||
1222 | entry->crc[3] = crc3; | ||
1223 | entry->crc[4] = crc4; | ||
1224 | |||
1225 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | ||
1226 | atomic_set(&pipe_crc->head, head); | ||
1227 | |||
1228 | wake_up_interruptible(&pipe_crc->wq); | ||
1229 | } | ||
1230 | |||
1231 | static void hsw_pipe_crc_update(struct drm_device *dev, enum pipe pipe) | ||
1232 | { | ||
1233 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1234 | |||
1235 | display_pipe_crc_update(dev, pipe, | ||
1236 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | ||
1237 | 0, 0, 0, 0); | ||
1238 | } | ||
1239 | |||
1240 | static void ivb_pipe_crc_update(struct drm_device *dev, enum pipe pipe) | ||
1241 | { | ||
1242 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1243 | |||
1244 | display_pipe_crc_update(dev, pipe, | ||
1245 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | ||
1246 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), | ||
1247 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), | ||
1248 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), | ||
1249 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); | ||
1250 | } | ||
1251 | |||
1252 | static void ilk_pipe_crc_update(struct drm_device *dev, enum pipe pipe) | ||
1253 | { | ||
1254 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1255 | |||
1256 | display_pipe_crc_update(dev, pipe, | ||
1257 | I915_READ(PIPE_CRC_RES_RED_ILK(pipe)), | ||
1258 | I915_READ(PIPE_CRC_RES_GREEN_ILK(pipe)), | ||
1259 | I915_READ(PIPE_CRC_RES_BLUE_ILK(pipe)), | ||
1260 | I915_READ(PIPE_CRC_RES_RES1_ILK(pipe)), | ||
1261 | I915_READ(PIPE_CRC_RES_RES2_ILK(pipe))); | ||
1262 | } | ||
1263 | #else | ||
1264 | static inline void hsw_pipe_crc_update(struct drm_device *dev, int pipe) {} | ||
1265 | static inline void ivb_pipe_crc_update(struct drm_device *dev, int pipe) {} | ||
1266 | static inline void ilk_pipe_crc_update(struct drm_device *dev, int pipe) {} | ||
1267 | #endif | ||
1268 | |||
1095 | /* The RPS events need forcewake, so we add them to a work queue and mask their | 1269 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
1096 | * IMR bits until the work is done. Other interrupts can be processed without | 1270 | * IMR bits until the work is done. Other interrupts can be processed without |
1097 | * the work queue. */ | 1271 | * the work queue. */ |
@@ -1254,21 +1428,26 @@ static void ivb_err_int_handler(struct drm_device *dev) | |||
1254 | { | 1428 | { |
1255 | struct drm_i915_private *dev_priv = dev->dev_private; | 1429 | struct drm_i915_private *dev_priv = dev->dev_private; |
1256 | u32 err_int = I915_READ(GEN7_ERR_INT); | 1430 | u32 err_int = I915_READ(GEN7_ERR_INT); |
1431 | enum pipe pipe; | ||
1257 | 1432 | ||
1258 | if (err_int & ERR_INT_POISON) | 1433 | if (err_int & ERR_INT_POISON) |
1259 | DRM_ERROR("Poison interrupt\n"); | 1434 | DRM_ERROR("Poison interrupt\n"); |
1260 | 1435 | ||
1261 | if (err_int & ERR_INT_FIFO_UNDERRUN_A) | 1436 | for_each_pipe(pipe) { |
1262 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | 1437 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { |
1263 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | 1438 | if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, |
1264 | 1439 | false)) | |
1265 | if (err_int & ERR_INT_FIFO_UNDERRUN_B) | 1440 | DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", |
1266 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | 1441 | pipe_name(pipe)); |
1267 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | 1442 | } |
1268 | 1443 | ||
1269 | if (err_int & ERR_INT_FIFO_UNDERRUN_C) | 1444 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { |
1270 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) | 1445 | if (IS_IVYBRIDGE(dev)) |
1271 | DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); | 1446 | ivb_pipe_crc_update(dev, pipe); |
1447 | else | ||
1448 | hsw_pipe_crc_update(dev, pipe); | ||
1449 | } | ||
1450 | } | ||
1272 | 1451 | ||
1273 | I915_WRITE(GEN7_ERR_INT, err_int); | 1452 | I915_WRITE(GEN7_ERR_INT, err_int); |
1274 | } | 1453 | } |
@@ -1363,6 +1542,12 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) | |||
1363 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | 1542 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) |
1364 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | 1543 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); |
1365 | 1544 | ||
1545 | if (de_iir & DE_PIPEA_CRC_DONE) | ||
1546 | ilk_pipe_crc_update(dev, PIPE_A); | ||
1547 | |||
1548 | if (de_iir & DE_PIPEB_CRC_DONE) | ||
1549 | ilk_pipe_crc_update(dev, PIPE_B); | ||
1550 | |||
1366 | if (de_iir & DE_PLANEA_FLIP_DONE) { | 1551 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
1367 | intel_prepare_page_flip(dev, 0); | 1552 | intel_prepare_page_flip(dev, 0); |
1368 | intel_finish_page_flip_plane(dev, 0); | 1553 | intel_finish_page_flip_plane(dev, 0); |
@@ -1988,6 +2173,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) | |||
1988 | if (tmp & RING_WAIT) { | 2173 | if (tmp & RING_WAIT) { |
1989 | DRM_ERROR("Kicking stuck wait on %s\n", | 2174 | DRM_ERROR("Kicking stuck wait on %s\n", |
1990 | ring->name); | 2175 | ring->name); |
2176 | i915_handle_error(dev, false); | ||
1991 | I915_WRITE_CTL(ring, tmp); | 2177 | I915_WRITE_CTL(ring, tmp); |
1992 | return HANGCHECK_KICK; | 2178 | return HANGCHECK_KICK; |
1993 | } | 2179 | } |
@@ -1999,6 +2185,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) | |||
1999 | case 1: | 2185 | case 1: |
2000 | DRM_ERROR("Kicking stuck semaphore on %s\n", | 2186 | DRM_ERROR("Kicking stuck semaphore on %s\n", |
2001 | ring->name); | 2187 | ring->name); |
2188 | i915_handle_error(dev, false); | ||
2002 | I915_WRITE_CTL(ring, tmp); | 2189 | I915_WRITE_CTL(ring, tmp); |
2003 | return HANGCHECK_KICK; | 2190 | return HANGCHECK_KICK; |
2004 | case 0: | 2191 | case 0: |
@@ -2337,8 +2524,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
2337 | } else { | 2524 | } else { |
2338 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 2525 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
2339 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 2526 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
2340 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | | 2527 | DE_AUX_CHANNEL_A | |
2341 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON); | 2528 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | |
2529 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | ||
2530 | DE_POISON); | ||
2342 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | 2531 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; |
2343 | } | 2532 | } |
2344 | 2533 | ||
@@ -3153,18 +3342,21 @@ void intel_irq_init(struct drm_device *dev) | |||
3153 | 3342 | ||
3154 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); | 3343 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
3155 | 3344 | ||
3156 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 3345 | if (IS_GEN2(dev)) { |
3157 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | 3346 | dev->max_vblank_count = 0; |
3158 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | 3347 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; |
3348 | } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { | ||
3159 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ | 3349 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
3160 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | 3350 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
3351 | } else { | ||
3352 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | ||
3353 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | ||
3161 | } | 3354 | } |
3162 | 3355 | ||
3163 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 3356 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
3164 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 3357 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
3165 | else | 3358 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
3166 | dev->driver->get_vblank_timestamp = NULL; | 3359 | } |
3167 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | ||
3168 | 3360 | ||
3169 | if (IS_VALLEYVIEW(dev)) { | 3361 | if (IS_VALLEYVIEW(dev)) { |
3170 | dev->driver->irq_handler = valleyview_irq_handler; | 3362 | dev->driver->irq_handler = valleyview_irq_handler; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 95385023e0ba..0e7488b64965 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -26,6 +26,7 @@ | |||
26 | #define _I915_REG_H_ | 26 | #define _I915_REG_H_ |
27 | 27 | ||
28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) | 28 | #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) |
29 | #define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc)) | ||
29 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) | 30 | #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) |
30 | 31 | ||
31 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) | 32 | #define _PORT(port, a, b) ((a) + (port)*((b)-(a))) |
@@ -722,8 +723,12 @@ | |||
722 | #define GEN7_ERR_INT 0x44040 | 723 | #define GEN7_ERR_INT 0x44040 |
723 | #define ERR_INT_POISON (1<<31) | 724 | #define ERR_INT_POISON (1<<31) |
724 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) | 725 | #define ERR_INT_MMIO_UNCLAIMED (1<<13) |
726 | #define ERR_INT_PIPE_CRC_DONE_C (1<<8) | ||
725 | #define ERR_INT_FIFO_UNDERRUN_C (1<<6) | 727 | #define ERR_INT_FIFO_UNDERRUN_C (1<<6) |
728 | #define ERR_INT_PIPE_CRC_DONE_B (1<<5) | ||
726 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) | 729 | #define ERR_INT_FIFO_UNDERRUN_B (1<<3) |
730 | #define ERR_INT_PIPE_CRC_DONE_A (1<<2) | ||
731 | #define ERR_INT_PIPE_CRC_DONE(pipe) (1<<(2 + pipe*3)) | ||
727 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) | 732 | #define ERR_INT_FIFO_UNDERRUN_A (1<<0) |
728 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) | 733 | #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) |
729 | 734 | ||
@@ -1835,6 +1840,61 @@ | |||
1835 | * Display engine regs | 1840 | * Display engine regs |
1836 | */ | 1841 | */ |
1837 | 1842 | ||
1843 | /* Pipe A CRC regs */ | ||
1844 | #define _PIPE_CRC_CTL_A (dev_priv->info->display_mmio_offset + 0x60050) | ||
1845 | #define PIPE_CRC_ENABLE (1 << 31) | ||
1846 | #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) | ||
1847 | #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) | ||
1848 | #define PIPE_CRC_SOURCE_PF_IVB (2 << 29) | ||
1849 | #define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) | ||
1850 | #define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) | ||
1851 | #define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) | ||
1852 | /* embedded DP port on the north display block, reserved on ivb */ | ||
1853 | #define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) | ||
1854 | #define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ | ||
1855 | #define _PIPE_CRC_RES_1_A_IVB 0x60064 | ||
1856 | #define _PIPE_CRC_RES_2_A_IVB 0x60068 | ||
1857 | #define _PIPE_CRC_RES_3_A_IVB 0x6006c | ||
1858 | #define _PIPE_CRC_RES_4_A_IVB 0x60070 | ||
1859 | #define _PIPE_CRC_RES_5_A_IVB 0x60074 | ||
1860 | |||
1861 | #define _PIPE_CRC_RES_RED_A_ILK 0x60060 | ||
1862 | #define _PIPE_CRC_RES_GREEN_A_ILK 0x60064 | ||
1863 | #define _PIPE_CRC_RES_BLUE_A_ILK 0x60068 | ||
1864 | #define _PIPE_CRC_RES_RES1_A_ILK 0x6006c | ||
1865 | #define _PIPE_CRC_RES_RES2_A_ILK 0x60080 | ||
1866 | |||
1867 | /* Pipe B CRC regs */ | ||
1868 | #define _PIPE_CRC_CTL_B 0x61050 | ||
1869 | #define _PIPE_CRC_RES_1_B_IVB 0x61064 | ||
1870 | #define _PIPE_CRC_RES_2_B_IVB 0x61068 | ||
1871 | #define _PIPE_CRC_RES_3_B_IVB 0x6106c | ||
1872 | #define _PIPE_CRC_RES_4_B_IVB 0x61070 | ||
1873 | #define _PIPE_CRC_RES_5_B_IVB 0x61074 | ||
1874 | |||
1875 | #define PIPE_CRC_CTL(pipe) _PIPE(pipe, _PIPE_CRC_CTL_A, _PIPE_CRC_CTL_B) | ||
1876 | #define PIPE_CRC_RES_1_IVB(pipe) \ | ||
1877 | _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB) | ||
1878 | #define PIPE_CRC_RES_2_IVB(pipe) \ | ||
1879 | _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB) | ||
1880 | #define PIPE_CRC_RES_3_IVB(pipe) \ | ||
1881 | _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB) | ||
1882 | #define PIPE_CRC_RES_4_IVB(pipe) \ | ||
1883 | _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB) | ||
1884 | #define PIPE_CRC_RES_5_IVB(pipe) \ | ||
1885 | _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB) | ||
1886 | |||
1887 | #define PIPE_CRC_RES_RED_ILK(pipe) \ | ||
1888 | _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A_ILK, 0x01000) | ||
1889 | #define PIPE_CRC_RES_GREEN_ILK(pipe) \ | ||
1890 | _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A_ILK, 0x01000) | ||
1891 | #define PIPE_CRC_RES_BLUE_ILK(pipe) \ | ||
1892 | _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A_ILK, 0x01000) | ||
1893 | #define PIPE_CRC_RES_RES1_ILK(pipe) \ | ||
1894 | _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_ILK, 0x01000) | ||
1895 | #define PIPE_CRC_RES_RES2_ILK(pipe) \ | ||
1896 | _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_ILK, 0x01000) | ||
1897 | |||
1838 | /* Pipe A timing regs */ | 1898 | /* Pipe A timing regs */ |
1839 | #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) | 1899 | #define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) |
1840 | #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) | 1900 | #define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) |
@@ -1857,7 +1917,6 @@ | |||
1857 | #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) | 1917 | #define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) |
1858 | #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) | 1918 | #define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) |
1859 | 1919 | ||
1860 | |||
1861 | #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) | 1920 | #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) |
1862 | #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) | 1921 | #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B) |
1863 | #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) | 1922 | #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B) |
@@ -3251,11 +3310,11 @@ | |||
3251 | 3310 | ||
3252 | /* define the Watermark register on Ironlake */ | 3311 | /* define the Watermark register on Ironlake */ |
3253 | #define WM0_PIPEA_ILK 0x45100 | 3312 | #define WM0_PIPEA_ILK 0x45100 |
3254 | #define WM0_PIPE_PLANE_MASK (0x7f<<16) | 3313 | #define WM0_PIPE_PLANE_MASK (0xffff<<16) |
3255 | #define WM0_PIPE_PLANE_SHIFT 16 | 3314 | #define WM0_PIPE_PLANE_SHIFT 16 |
3256 | #define WM0_PIPE_SPRITE_MASK (0x3f<<8) | 3315 | #define WM0_PIPE_SPRITE_MASK (0xff<<8) |
3257 | #define WM0_PIPE_SPRITE_SHIFT 8 | 3316 | #define WM0_PIPE_SPRITE_SHIFT 8 |
3258 | #define WM0_PIPE_CURSOR_MASK (0x1f) | 3317 | #define WM0_PIPE_CURSOR_MASK (0xff) |
3259 | 3318 | ||
3260 | #define WM0_PIPEB_ILK 0x45104 | 3319 | #define WM0_PIPEB_ILK 0x45104 |
3261 | #define WM0_PIPEC_IVB 0x45200 | 3320 | #define WM0_PIPEC_IVB 0x45200 |
@@ -3265,9 +3324,9 @@ | |||
3265 | #define WM1_LP_LATENCY_MASK (0x7f<<24) | 3324 | #define WM1_LP_LATENCY_MASK (0x7f<<24) |
3266 | #define WM1_LP_FBC_MASK (0xf<<20) | 3325 | #define WM1_LP_FBC_MASK (0xf<<20) |
3267 | #define WM1_LP_FBC_SHIFT 20 | 3326 | #define WM1_LP_FBC_SHIFT 20 |
3268 | #define WM1_LP_SR_MASK (0x1ff<<8) | 3327 | #define WM1_LP_SR_MASK (0x7ff<<8) |
3269 | #define WM1_LP_SR_SHIFT 8 | 3328 | #define WM1_LP_SR_SHIFT 8 |
3270 | #define WM1_LP_CURSOR_MASK (0x3f) | 3329 | #define WM1_LP_CURSOR_MASK (0xff) |
3271 | #define WM2_LP_ILK 0x4510c | 3330 | #define WM2_LP_ILK 0x4510c |
3272 | #define WM2_LP_EN (1<<31) | 3331 | #define WM2_LP_EN (1<<31) |
3273 | #define WM3_LP_ILK 0x45110 | 3332 | #define WM3_LP_ILK 0x45110 |
@@ -3348,17 +3407,17 @@ | |||
3348 | * } while (high1 != high2); | 3407 | * } while (high1 != high2); |
3349 | * frame = (high1 << 8) | low1; | 3408 | * frame = (high1 << 8) | low1; |
3350 | */ | 3409 | */ |
3351 | #define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) | 3410 | #define _PIPEAFRAMEHIGH 0x70040 |
3352 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff | 3411 | #define PIPE_FRAME_HIGH_MASK 0x0000ffff |
3353 | #define PIPE_FRAME_HIGH_SHIFT 0 | 3412 | #define PIPE_FRAME_HIGH_SHIFT 0 |
3354 | #define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) | 3413 | #define _PIPEAFRAMEPIXEL 0x70044 |
3355 | #define PIPE_FRAME_LOW_MASK 0xff000000 | 3414 | #define PIPE_FRAME_LOW_MASK 0xff000000 |
3356 | #define PIPE_FRAME_LOW_SHIFT 24 | 3415 | #define PIPE_FRAME_LOW_SHIFT 24 |
3357 | #define PIPE_PIXEL_MASK 0x00ffffff | 3416 | #define PIPE_PIXEL_MASK 0x00ffffff |
3358 | #define PIPE_PIXEL_SHIFT 0 | 3417 | #define PIPE_PIXEL_SHIFT 0 |
3359 | /* GM45+ just has to be different */ | 3418 | /* GM45+ just has to be different */ |
3360 | #define _PIPEA_FRMCOUNT_GM45 0x70040 | 3419 | #define _PIPEA_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70040) |
3361 | #define _PIPEA_FLIPCOUNT_GM45 0x70044 | 3420 | #define _PIPEA_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x70044) |
3362 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) | 3421 | #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) |
3363 | 3422 | ||
3364 | /* Cursor A & B regs */ | 3423 | /* Cursor A & B regs */ |
@@ -3489,10 +3548,10 @@ | |||
3489 | #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) | 3548 | #define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) |
3490 | #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) | 3549 | #define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) |
3491 | #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) | 3550 | #define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) |
3492 | #define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) | 3551 | #define _PIPEBFRAMEHIGH 0x71040 |
3493 | #define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) | 3552 | #define _PIPEBFRAMEPIXEL 0x71044 |
3494 | #define _PIPEB_FRMCOUNT_GM45 0x71040 | 3553 | #define _PIPEB_FRMCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71040) |
3495 | #define _PIPEB_FLIPCOUNT_GM45 0x71044 | 3554 | #define _PIPEB_FLIPCOUNT_GM45 (dev_priv->info->display_mmio_offset + 0x71044) |
3496 | 3555 | ||
3497 | 3556 | ||
3498 | /* Display B control */ | 3557 | /* Display B control */ |
@@ -3860,12 +3919,14 @@ | |||
3860 | #define DE_PIPEB_ODD_FIELD (1 << 13) | 3919 | #define DE_PIPEB_ODD_FIELD (1 << 13) |
3861 | #define DE_PIPEB_LINE_COMPARE (1 << 12) | 3920 | #define DE_PIPEB_LINE_COMPARE (1 << 12) |
3862 | #define DE_PIPEB_VSYNC (1 << 11) | 3921 | #define DE_PIPEB_VSYNC (1 << 11) |
3922 | #define DE_PIPEB_CRC_DONE (1 << 10) | ||
3863 | #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) | 3923 | #define DE_PIPEB_FIFO_UNDERRUN (1 << 8) |
3864 | #define DE_PIPEA_VBLANK (1 << 7) | 3924 | #define DE_PIPEA_VBLANK (1 << 7) |
3865 | #define DE_PIPEA_EVEN_FIELD (1 << 6) | 3925 | #define DE_PIPEA_EVEN_FIELD (1 << 6) |
3866 | #define DE_PIPEA_ODD_FIELD (1 << 5) | 3926 | #define DE_PIPEA_ODD_FIELD (1 << 5) |
3867 | #define DE_PIPEA_LINE_COMPARE (1 << 4) | 3927 | #define DE_PIPEA_LINE_COMPARE (1 << 4) |
3868 | #define DE_PIPEA_VSYNC (1 << 3) | 3928 | #define DE_PIPEA_VSYNC (1 << 3) |
3929 | #define DE_PIPEA_CRC_DONE (1 << 2) | ||
3869 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) | 3930 | #define DE_PIPEA_FIFO_UNDERRUN (1 << 0) |
3870 | 3931 | ||
3871 | /* More Ivybridge lolz */ | 3932 | /* More Ivybridge lolz */ |
@@ -4867,7 +4928,17 @@ | |||
4867 | #define AUD_CONFIG_LOWER_N_SHIFT 4 | 4928 | #define AUD_CONFIG_LOWER_N_SHIFT 4 |
4868 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) | 4929 | #define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) |
4869 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 | 4930 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 |
4870 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16) | 4931 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16) |
4932 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16) | ||
4933 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 (1 << 16) | ||
4934 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 (2 << 16) | ||
4935 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 (3 << 16) | ||
4936 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 (4 << 16) | ||
4937 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 (5 << 16) | ||
4938 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 (6 << 16) | ||
4939 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16) | ||
4940 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16) | ||
4941 | #define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16) | ||
4871 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) | 4942 | #define AUD_CONFIG_DISABLE_NCTS (1 << 3) |
4872 | 4943 | ||
4873 | /* HSW Audio */ | 4944 | /* HSW Audio */ |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 3538370e3a47..a088f1f46bdb 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -369,7 +369,8 @@ int i915_save_state(struct drm_device *dev) | |||
369 | intel_disable_gt_powersave(dev); | 369 | intel_disable_gt_powersave(dev); |
370 | 370 | ||
371 | /* Cache mode state */ | 371 | /* Cache mode state */ |
372 | dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | 372 | if (INTEL_INFO(dev)->gen < 7) |
373 | dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); | ||
373 | 374 | ||
374 | /* Memory Arbitration state */ | 375 | /* Memory Arbitration state */ |
375 | dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); | 376 | dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); |
@@ -418,7 +419,9 @@ int i915_restore_state(struct drm_device *dev) | |||
418 | } | 419 | } |
419 | 420 | ||
420 | /* Cache mode state */ | 421 | /* Cache mode state */ |
421 | I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000); | 422 | if (INTEL_INFO(dev)->gen < 7) |
423 | I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | | ||
424 | 0xffff0000); | ||
422 | 425 | ||
423 | /* Memory arbitration state */ | 426 | /* Memory arbitration state */ |
424 | I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); | 427 | I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000); |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 1beec51b8e26..cef38fd320a7 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -253,6 +253,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, | |||
253 | struct drm_i915_private *dev_priv = dev->dev_private; | 253 | struct drm_i915_private *dev_priv = dev->dev_private; |
254 | int ret; | 254 | int ret; |
255 | 255 | ||
256 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
257 | |||
256 | mutex_lock(&dev_priv->rps.hw_lock); | 258 | mutex_lock(&dev_priv->rps.hw_lock); |
257 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 259 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
258 | u32 freq; | 260 | u32 freq; |
@@ -285,6 +287,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute | |||
285 | struct drm_i915_private *dev_priv = dev->dev_private; | 287 | struct drm_i915_private *dev_priv = dev->dev_private; |
286 | int ret; | 288 | int ret; |
287 | 289 | ||
290 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
291 | |||
288 | mutex_lock(&dev_priv->rps.hw_lock); | 292 | mutex_lock(&dev_priv->rps.hw_lock); |
289 | if (IS_VALLEYVIEW(dev_priv->dev)) | 293 | if (IS_VALLEYVIEW(dev_priv->dev)) |
290 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); | 294 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay); |
@@ -309,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
309 | if (ret) | 313 | if (ret) |
310 | return ret; | 314 | return ret; |
311 | 315 | ||
316 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
317 | |||
312 | mutex_lock(&dev_priv->rps.hw_lock); | 318 | mutex_lock(&dev_priv->rps.hw_lock); |
313 | 319 | ||
314 | if (IS_VALLEYVIEW(dev_priv->dev)) { | 320 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
@@ -357,6 +363,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute | |||
357 | struct drm_i915_private *dev_priv = dev->dev_private; | 363 | struct drm_i915_private *dev_priv = dev->dev_private; |
358 | int ret; | 364 | int ret; |
359 | 365 | ||
366 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
367 | |||
360 | mutex_lock(&dev_priv->rps.hw_lock); | 368 | mutex_lock(&dev_priv->rps.hw_lock); |
361 | if (IS_VALLEYVIEW(dev_priv->dev)) | 369 | if (IS_VALLEYVIEW(dev_priv->dev)) |
362 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); | 370 | ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay); |
@@ -381,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
381 | if (ret) | 389 | if (ret) |
382 | return ret; | 390 | return ret; |
383 | 391 | ||
392 | flush_delayed_work(&dev_priv->rps.delayed_resume_work); | ||
393 | |||
384 | mutex_lock(&dev_priv->rps.hw_lock); | 394 | mutex_lock(&dev_priv->rps.hw_lock); |
385 | 395 | ||
386 | if (IS_VALLEYVIEW(dev)) { | 396 | if (IS_VALLEYVIEW(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 942b9acb0d8e..2e01bd3a5d8c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -274,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder) | |||
274 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; | 274 | struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; |
275 | u32 adpa; | 275 | u32 adpa; |
276 | 276 | ||
277 | if (HAS_PCH_SPLIT(dev)) | 277 | if (INTEL_INFO(dev)->gen >= 5) |
278 | adpa = ADPA_HOTPLUG_BITS; | 278 | adpa = ADPA_HOTPLUG_BITS; |
279 | else | 279 | else |
280 | adpa = 0; | 280 | adpa = 0; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 6d335f8ca343..31f4fe271388 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1345,6 +1345,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { | |||
1345 | .destroy = intel_ddi_destroy, | 1345 | .destroy = intel_ddi_destroy, |
1346 | }; | 1346 | }; |
1347 | 1347 | ||
1348 | static struct intel_connector * | ||
1349 | intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) | ||
1350 | { | ||
1351 | struct intel_connector *connector; | ||
1352 | enum port port = intel_dig_port->port; | ||
1353 | |||
1354 | connector = kzalloc(sizeof(*connector), GFP_KERNEL); | ||
1355 | if (!connector) | ||
1356 | return NULL; | ||
1357 | |||
1358 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | ||
1359 | if (!intel_dp_init_connector(intel_dig_port, connector)) { | ||
1360 | kfree(connector); | ||
1361 | return NULL; | ||
1362 | } | ||
1363 | |||
1364 | return connector; | ||
1365 | } | ||
1366 | |||
1367 | static struct intel_connector * | ||
1368 | intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) | ||
1369 | { | ||
1370 | struct intel_connector *connector; | ||
1371 | enum port port = intel_dig_port->port; | ||
1372 | |||
1373 | connector = kzalloc(sizeof(*connector), GFP_KERNEL); | ||
1374 | if (!connector) | ||
1375 | return NULL; | ||
1376 | |||
1377 | intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); | ||
1378 | intel_hdmi_init_connector(intel_dig_port, connector); | ||
1379 | |||
1380 | return connector; | ||
1381 | } | ||
1382 | |||
1348 | void intel_ddi_init(struct drm_device *dev, enum port port) | 1383 | void intel_ddi_init(struct drm_device *dev, enum port port) |
1349 | { | 1384 | { |
1350 | struct drm_i915_private *dev_priv = dev->dev_private; | 1385 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -1369,12 +1404,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1369 | if (!intel_dig_port) | 1404 | if (!intel_dig_port) |
1370 | return; | 1405 | return; |
1371 | 1406 | ||
1372 | dp_connector = kzalloc(sizeof(*dp_connector), GFP_KERNEL); | ||
1373 | if (!dp_connector) { | ||
1374 | kfree(intel_dig_port); | ||
1375 | return; | ||
1376 | } | ||
1377 | |||
1378 | intel_encoder = &intel_dig_port->base; | 1407 | intel_encoder = &intel_dig_port->base; |
1379 | encoder = &intel_encoder->base; | 1408 | encoder = &intel_encoder->base; |
1380 | 1409 | ||
@@ -1394,29 +1423,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port) | |||
1394 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & | 1423 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
1395 | (DDI_BUF_PORT_REVERSAL | | 1424 | (DDI_BUF_PORT_REVERSAL | |
1396 | DDI_A_4_LANES); | 1425 | DDI_A_4_LANES); |
1397 | intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); | ||
1398 | 1426 | ||
1399 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; | 1427 | intel_encoder->type = INTEL_OUTPUT_UNKNOWN; |
1400 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | 1428 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
1401 | intel_encoder->cloneable = false; | 1429 | intel_encoder->cloneable = false; |
1402 | intel_encoder->hot_plug = intel_ddi_hot_plug; | 1430 | intel_encoder->hot_plug = intel_ddi_hot_plug; |
1403 | 1431 | ||
1404 | if (init_dp && !intel_dp_init_connector(intel_dig_port, dp_connector)) { | 1432 | if (init_dp) |
1405 | drm_encoder_cleanup(encoder); | 1433 | dp_connector = intel_ddi_init_dp_connector(intel_dig_port); |
1406 | kfree(intel_dig_port); | ||
1407 | kfree(dp_connector); | ||
1408 | return; | ||
1409 | } | ||
1410 | 1434 | ||
1411 | /* In theory we don't need the encoder->type check, but leave it just in | 1435 | /* In theory we don't need the encoder->type check, but leave it just in |
1412 | * case we have some really bad VBTs... */ | 1436 | * case we have some really bad VBTs... */ |
1413 | if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { | 1437 | if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) |
1414 | hdmi_connector = kzalloc(sizeof(*hdmi_connector), | 1438 | hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port); |
1415 | GFP_KERNEL); | ||
1416 | if (!hdmi_connector) | ||
1417 | return; | ||
1418 | 1439 | ||
1419 | intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); | 1440 | if (!dp_connector && !hdmi_connector) { |
1420 | intel_hdmi_init_connector(intel_dig_port, hdmi_connector); | 1441 | drm_encoder_cleanup(encoder); |
1442 | kfree(intel_dig_port); | ||
1421 | } | 1443 | } |
1422 | } | 1444 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 617b963dfb67..8905f83166f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -309,32 +309,30 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | |||
309 | .p2_slow = 7, .p2_fast = 7 }, | 309 | .p2_slow = 7, .p2_fast = 7 }, |
310 | }; | 310 | }; |
311 | 311 | ||
312 | static const intel_limit_t intel_limits_vlv_dac = { | 312 | static const intel_limit_t intel_limits_vlv = { |
313 | .dot = { .min = 25000, .max = 270000 }, | 313 | /* |
314 | * These are the data rate limits (measured in fast clocks) | ||
315 | * since those are the strictest limits we have. The fast | ||
316 | * clock and actual rate limits are more relaxed, so checking | ||
317 | * them would make no difference. | ||
318 | */ | ||
319 | .dot = { .min = 25000 * 5, .max = 270000 * 5 }, | ||
314 | .vco = { .min = 4000000, .max = 6000000 }, | 320 | .vco = { .min = 4000000, .max = 6000000 }, |
315 | .n = { .min = 1, .max = 7 }, | 321 | .n = { .min = 1, .max = 7 }, |
316 | .m = { .min = 22, .max = 450 }, /* guess */ | ||
317 | .m1 = { .min = 2, .max = 3 }, | 322 | .m1 = { .min = 2, .max = 3 }, |
318 | .m2 = { .min = 11, .max = 156 }, | 323 | .m2 = { .min = 11, .max = 156 }, |
319 | .p = { .min = 10, .max = 30 }, | ||
320 | .p1 = { .min = 1, .max = 3 }, | ||
321 | .p2 = { .dot_limit = 270000, | ||
322 | .p2_slow = 2, .p2_fast = 20 }, | ||
323 | }; | ||
324 | |||
325 | static const intel_limit_t intel_limits_vlv_hdmi = { | ||
326 | .dot = { .min = 25000, .max = 270000 }, | ||
327 | .vco = { .min = 4000000, .max = 6000000 }, | ||
328 | .n = { .min = 1, .max = 7 }, | ||
329 | .m = { .min = 60, .max = 300 }, /* guess */ | ||
330 | .m1 = { .min = 2, .max = 3 }, | ||
331 | .m2 = { .min = 11, .max = 156 }, | ||
332 | .p = { .min = 10, .max = 30 }, | ||
333 | .p1 = { .min = 2, .max = 3 }, | 324 | .p1 = { .min = 2, .max = 3 }, |
334 | .p2 = { .dot_limit = 270000, | 325 | .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ |
335 | .p2_slow = 2, .p2_fast = 20 }, | ||
336 | }; | 326 | }; |
337 | 327 | ||
328 | static void vlv_clock(int refclk, intel_clock_t *clock) | ||
329 | { | ||
330 | clock->m = clock->m1 * clock->m2; | ||
331 | clock->p = clock->p1 * clock->p2; | ||
332 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); | ||
333 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); | ||
334 | } | ||
335 | |||
338 | /** | 336 | /** |
339 | * Returns whether any output on the specified pipe is of the specified type | 337 | * Returns whether any output on the specified pipe is of the specified type |
340 | */ | 338 | */ |
@@ -410,10 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) | |||
410 | else | 408 | else |
411 | limit = &intel_limits_pineview_sdvo; | 409 | limit = &intel_limits_pineview_sdvo; |
412 | } else if (IS_VALLEYVIEW(dev)) { | 410 | } else if (IS_VALLEYVIEW(dev)) { |
413 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) | 411 | limit = &intel_limits_vlv; |
414 | limit = &intel_limits_vlv_dac; | ||
415 | else | ||
416 | limit = &intel_limits_vlv_hdmi; | ||
417 | } else if (!IS_GEN2(dev)) { | 412 | } else if (!IS_GEN2(dev)) { |
418 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | 413 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
419 | limit = &intel_limits_i9xx_lvds; | 414 | limit = &intel_limits_i9xx_lvds; |
@@ -435,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock) | |||
435 | { | 430 | { |
436 | clock->m = clock->m2 + 2; | 431 | clock->m = clock->m2 + 2; |
437 | clock->p = clock->p1 * clock->p2; | 432 | clock->p = clock->p1 * clock->p2; |
438 | clock->vco = refclk * clock->m / clock->n; | 433 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); |
439 | clock->dot = clock->vco / clock->p; | 434 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
440 | } | 435 | } |
441 | 436 | ||
442 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) | 437 | static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) |
@@ -448,8 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock) | |||
448 | { | 443 | { |
449 | clock->m = i9xx_dpll_compute_m(clock); | 444 | clock->m = i9xx_dpll_compute_m(clock); |
450 | clock->p = clock->p1 * clock->p2; | 445 | clock->p = clock->p1 * clock->p2; |
451 | clock->vco = refclk * clock->m / (clock->n + 2); | 446 | clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); |
452 | clock->dot = clock->vco / clock->p; | 447 | clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); |
453 | } | 448 | } |
454 | 449 | ||
455 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) | 450 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
@@ -462,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev, | |||
462 | const intel_limit_t *limit, | 457 | const intel_limit_t *limit, |
463 | const intel_clock_t *clock) | 458 | const intel_clock_t *clock) |
464 | { | 459 | { |
460 | if (clock->n < limit->n.min || limit->n.max < clock->n) | ||
461 | INTELPllInvalid("n out of range\n"); | ||
465 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) | 462 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
466 | INTELPllInvalid("p1 out of range\n"); | 463 | INTELPllInvalid("p1 out of range\n"); |
467 | if (clock->p < limit->p.min || limit->p.max < clock->p) | ||
468 | INTELPllInvalid("p out of range\n"); | ||
469 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) | 464 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
470 | INTELPllInvalid("m2 out of range\n"); | 465 | INTELPllInvalid("m2 out of range\n"); |
471 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) | 466 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
472 | INTELPllInvalid("m1 out of range\n"); | 467 | INTELPllInvalid("m1 out of range\n"); |
473 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) | 468 | |
474 | INTELPllInvalid("m1 <= m2\n"); | 469 | if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) |
475 | if (clock->m < limit->m.min || limit->m.max < clock->m) | 470 | if (clock->m1 <= clock->m2) |
476 | INTELPllInvalid("m out of range\n"); | 471 | INTELPllInvalid("m1 <= m2\n"); |
477 | if (clock->n < limit->n.min || limit->n.max < clock->n) | 472 | |
478 | INTELPllInvalid("n out of range\n"); | 473 | if (!IS_VALLEYVIEW(dev)) { |
474 | if (clock->p < limit->p.min || limit->p.max < clock->p) | ||
475 | INTELPllInvalid("p out of range\n"); | ||
476 | if (clock->m < limit->m.min || limit->m.max < clock->m) | ||
477 | INTELPllInvalid("m out of range\n"); | ||
478 | } | ||
479 | |||
479 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) | 480 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
480 | INTELPllInvalid("vco out of range\n"); | 481 | INTELPllInvalid("vco out of range\n"); |
481 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, | 482 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
@@ -669,68 +670,56 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
669 | int target, int refclk, intel_clock_t *match_clock, | 670 | int target, int refclk, intel_clock_t *match_clock, |
670 | intel_clock_t *best_clock) | 671 | intel_clock_t *best_clock) |
671 | { | 672 | { |
672 | u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; | 673 | struct drm_device *dev = crtc->dev; |
673 | u32 m, n, fastclk; | 674 | intel_clock_t clock; |
674 | u32 updrate, minupdate, p; | 675 | unsigned int bestppm = 1000000; |
675 | unsigned long bestppm, ppm, absppm; | 676 | /* min update 19.2 MHz */ |
676 | int dotclk, flag; | 677 | int max_n = min(limit->n.max, refclk / 19200); |
677 | 678 | bool found = false; | |
678 | flag = 0; | 679 | |
679 | dotclk = target * 1000; | 680 | target *= 5; /* fast clock */ |
680 | bestppm = 1000000; | 681 | |
681 | ppm = absppm = 0; | 682 | memset(best_clock, 0, sizeof(*best_clock)); |
682 | fastclk = dotclk / (2*100); | ||
683 | updrate = 0; | ||
684 | minupdate = 19200; | ||
685 | n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; | ||
686 | bestm1 = bestm2 = bestp1 = bestp2 = 0; | ||
687 | 683 | ||
688 | /* based on hardware requirement, prefer smaller n to precision */ | 684 | /* based on hardware requirement, prefer smaller n to precision */ |
689 | for (n = limit->n.min; n <= ((refclk) / minupdate); n++) { | 685 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
690 | updrate = refclk / n; | 686 | for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { |
691 | for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) { | 687 | for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; |
692 | for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) { | 688 | clock.p2 -= clock.p2 > 10 ? 2 : 1) { |
693 | if (p2 > 10) | 689 | clock.p = clock.p1 * clock.p2; |
694 | p2 = p2 - 1; | ||
695 | p = p1 * p2; | ||
696 | /* based on hardware requirement, prefer bigger m1,m2 values */ | 690 | /* based on hardware requirement, prefer bigger m1,m2 values */ |
697 | for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) { | 691 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { |
698 | m2 = DIV_ROUND_CLOSEST(fastclk * p * n, refclk * m1); | 692 | unsigned int ppm, diff; |
699 | m = m1 * m2; | 693 | |
700 | vco = updrate * m; | 694 | clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, |
695 | refclk * clock.m1); | ||
701 | 696 | ||
702 | if (vco < limit->vco.min || vco >= limit->vco.max) | 697 | vlv_clock(refclk, &clock); |
698 | |||
699 | if (!intel_PLL_is_valid(dev, limit, | ||
700 | &clock)) | ||
703 | continue; | 701 | continue; |
704 | 702 | ||
705 | ppm = 1000000 * ((vco / p) - fastclk) / fastclk; | 703 | diff = abs(clock.dot - target); |
706 | absppm = (ppm > 0) ? ppm : (-ppm); | 704 | ppm = div_u64(1000000ULL * diff, target); |
707 | if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) { | 705 | |
706 | if (ppm < 100 && clock.p > best_clock->p) { | ||
708 | bestppm = 0; | 707 | bestppm = 0; |
709 | flag = 1; | 708 | *best_clock = clock; |
710 | } | 709 | found = true; |
711 | if (absppm < bestppm - 10) { | ||
712 | bestppm = absppm; | ||
713 | flag = 1; | ||
714 | } | 710 | } |
715 | if (flag) { | 711 | |
716 | bestn = n; | 712 | if (bestppm >= 10 && ppm < bestppm - 10) { |
717 | bestm1 = m1; | 713 | bestppm = ppm; |
718 | bestm2 = m2; | 714 | *best_clock = clock; |
719 | bestp1 = p1; | 715 | found = true; |
720 | bestp2 = p2; | ||
721 | flag = 0; | ||
722 | } | 716 | } |
723 | } | 717 | } |
724 | } | 718 | } |
725 | } | 719 | } |
726 | } | 720 | } |
727 | best_clock->n = bestn; | ||
728 | best_clock->m1 = bestm1; | ||
729 | best_clock->m2 = bestm2; | ||
730 | best_clock->p1 = bestp1; | ||
731 | best_clock->p2 = bestp2; | ||
732 | 721 | ||
733 | return true; | 722 | return found; |
734 | } | 723 | } |
735 | 724 | ||
736 | bool intel_crtc_active(struct drm_crtc *crtc) | 725 | bool intel_crtc_active(struct drm_crtc *crtc) |
@@ -811,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |||
811 | DRM_DEBUG_KMS("vblank wait timed out\n"); | 800 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
812 | } | 801 | } |
813 | 802 | ||
803 | static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) | ||
804 | { | ||
805 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
806 | u32 reg = PIPEDSL(pipe); | ||
807 | u32 line1, line2; | ||
808 | u32 line_mask; | ||
809 | |||
810 | if (IS_GEN2(dev)) | ||
811 | line_mask = DSL_LINEMASK_GEN2; | ||
812 | else | ||
813 | line_mask = DSL_LINEMASK_GEN3; | ||
814 | |||
815 | line1 = I915_READ(reg) & line_mask; | ||
816 | mdelay(5); | ||
817 | line2 = I915_READ(reg) & line_mask; | ||
818 | |||
819 | return line1 == line2; | ||
820 | } | ||
821 | |||
814 | /* | 822 | /* |
815 | * intel_wait_for_pipe_off - wait for pipe to turn off | 823 | * intel_wait_for_pipe_off - wait for pipe to turn off |
816 | * @dev: drm device | 824 | * @dev: drm device |
@@ -842,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) | |||
842 | 100)) | 850 | 100)) |
843 | WARN(1, "pipe_off wait timed out\n"); | 851 | WARN(1, "pipe_off wait timed out\n"); |
844 | } else { | 852 | } else { |
845 | u32 last_line, line_mask; | ||
846 | int reg = PIPEDSL(pipe); | ||
847 | unsigned long timeout = jiffies + msecs_to_jiffies(100); | ||
848 | |||
849 | if (IS_GEN2(dev)) | ||
850 | line_mask = DSL_LINEMASK_GEN2; | ||
851 | else | ||
852 | line_mask = DSL_LINEMASK_GEN3; | ||
853 | |||
854 | /* Wait for the display line to settle */ | 853 | /* Wait for the display line to settle */ |
855 | do { | 854 | if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) |
856 | last_line = I915_READ(reg) & line_mask; | ||
857 | mdelay(5); | ||
858 | } while (((I915_READ(reg) & line_mask) != last_line) && | ||
859 | time_after(timeout, jiffies)); | ||
860 | if (time_after(jiffies, timeout)) | ||
861 | WARN(1, "pipe_off wait timed out\n"); | 855 | WARN(1, "pipe_off wait timed out\n"); |
862 | } | 856 | } |
863 | } | 857 | } |
@@ -1823,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |||
1823 | * Plane regs are double buffered, going from enabled->disabled needs a | 1817 | * Plane regs are double buffered, going from enabled->disabled needs a |
1824 | * trigger in order to latch. The display address reg provides this. | 1818 | * trigger in order to latch. The display address reg provides this. |
1825 | */ | 1819 | */ |
1826 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, | 1820 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, |
1827 | enum plane plane) | 1821 | enum plane plane) |
1828 | { | 1822 | { |
1829 | if (dev_priv->info->gen >= 4) | 1823 | u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); |
1830 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); | 1824 | |
1831 | else | 1825 | I915_WRITE(reg, I915_READ(reg)); |
1832 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | 1826 | POSTING_READ(reg); |
1833 | } | 1827 | } |
1834 | 1828 | ||
1835 | /** | 1829 | /** |
1836 | * intel_enable_plane - enable a display plane on a given pipe | 1830 | * intel_enable_primary_plane - enable the primary plane on a given pipe |
1837 | * @dev_priv: i915 private structure | 1831 | * @dev_priv: i915 private structure |
1838 | * @plane: plane to enable | 1832 | * @plane: plane to enable |
1839 | * @pipe: pipe being fed | 1833 | * @pipe: pipe being fed |
1840 | * | 1834 | * |
1841 | * Enable @plane on @pipe, making sure that @pipe is running first. | 1835 | * Enable @plane on @pipe, making sure that @pipe is running first. |
1842 | */ | 1836 | */ |
1843 | static void intel_enable_plane(struct drm_i915_private *dev_priv, | 1837 | static void intel_enable_primary_plane(struct drm_i915_private *dev_priv, |
1844 | enum plane plane, enum pipe pipe) | 1838 | enum plane plane, enum pipe pipe) |
1845 | { | 1839 | { |
1840 | struct intel_crtc *intel_crtc = | ||
1841 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
1846 | int reg; | 1842 | int reg; |
1847 | u32 val; | 1843 | u32 val; |
1848 | 1844 | ||
1849 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | 1845 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ |
1850 | assert_pipe_enabled(dev_priv, pipe); | 1846 | assert_pipe_enabled(dev_priv, pipe); |
1851 | 1847 | ||
1848 | WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n"); | ||
1849 | |||
1850 | intel_crtc->primary_enabled = true; | ||
1851 | |||
1852 | reg = DSPCNTR(plane); | 1852 | reg = DSPCNTR(plane); |
1853 | val = I915_READ(reg); | 1853 | val = I915_READ(reg); |
1854 | if (val & DISPLAY_PLANE_ENABLE) | 1854 | if (val & DISPLAY_PLANE_ENABLE) |
1855 | return; | 1855 | return; |
1856 | 1856 | ||
1857 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | 1857 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); |
1858 | intel_flush_display_plane(dev_priv, plane); | 1858 | intel_flush_primary_plane(dev_priv, plane); |
1859 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1859 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1860 | } | 1860 | } |
1861 | 1861 | ||
1862 | /** | 1862 | /** |
1863 | * intel_disable_plane - disable a display plane | 1863 | * intel_disable_primary_plane - disable the primary plane |
1864 | * @dev_priv: i915 private structure | 1864 | * @dev_priv: i915 private structure |
1865 | * @plane: plane to disable | 1865 | * @plane: plane to disable |
1866 | * @pipe: pipe consuming the data | 1866 | * @pipe: pipe consuming the data |
1867 | * | 1867 | * |
1868 | * Disable @plane; should be an independent operation. | 1868 | * Disable @plane; should be an independent operation. |
1869 | */ | 1869 | */ |
1870 | static void intel_disable_plane(struct drm_i915_private *dev_priv, | 1870 | static void intel_disable_primary_plane(struct drm_i915_private *dev_priv, |
1871 | enum plane plane, enum pipe pipe) | 1871 | enum plane plane, enum pipe pipe) |
1872 | { | 1872 | { |
1873 | struct intel_crtc *intel_crtc = | ||
1874 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | ||
1873 | int reg; | 1875 | int reg; |
1874 | u32 val; | 1876 | u32 val; |
1875 | 1877 | ||
1878 | WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n"); | ||
1879 | |||
1880 | intel_crtc->primary_enabled = false; | ||
1881 | |||
1876 | reg = DSPCNTR(plane); | 1882 | reg = DSPCNTR(plane); |
1877 | val = I915_READ(reg); | 1883 | val = I915_READ(reg); |
1878 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | 1884 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1879 | return; | 1885 | return; |
1880 | 1886 | ||
1881 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | 1887 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); |
1882 | intel_flush_display_plane(dev_priv, plane); | 1888 | intel_flush_primary_plane(dev_priv, plane); |
1883 | intel_wait_for_vblank(dev_priv->dev, pipe); | 1889 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1884 | } | 1890 | } |
1885 | 1891 | ||
@@ -1915,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
1915 | alignment = 0; | 1921 | alignment = 0; |
1916 | break; | 1922 | break; |
1917 | case I915_TILING_Y: | 1923 | case I915_TILING_Y: |
1918 | /* Despite that we check this in framebuffer_init userspace can | 1924 | WARN(1, "Y tiled bo slipped through, driver bug!\n"); |
1919 | * screw us over and change the tiling after the fact. Only | ||
1920 | * pinned buffers can't change their tiling. */ | ||
1921 | DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n"); | ||
1922 | return -EINVAL; | 1925 | return -EINVAL; |
1923 | default: | 1926 | default: |
1924 | BUG(); | 1927 | BUG(); |
@@ -3332,7 +3335,7 @@ static void intel_disable_planes(struct drm_crtc *crtc) | |||
3332 | intel_plane_disable(&intel_plane->base); | 3335 | intel_plane_disable(&intel_plane->base); |
3333 | } | 3336 | } |
3334 | 3337 | ||
3335 | static void hsw_enable_ips(struct intel_crtc *crtc) | 3338 | void hsw_enable_ips(struct intel_crtc *crtc) |
3336 | { | 3339 | { |
3337 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 3340 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
3338 | 3341 | ||
@@ -3345,9 +3348,17 @@ static void hsw_enable_ips(struct intel_crtc *crtc) | |||
3345 | * for a vblank, so all we need to do here is to enable the IPS bit. */ | 3348 | * for a vblank, so all we need to do here is to enable the IPS bit. */ |
3346 | assert_plane_enabled(dev_priv, crtc->plane); | 3349 | assert_plane_enabled(dev_priv, crtc->plane); |
3347 | I915_WRITE(IPS_CTL, IPS_ENABLE); | 3350 | I915_WRITE(IPS_CTL, IPS_ENABLE); |
3351 | |||
3352 | /* The bit only becomes 1 in the next vblank, so this wait here is | ||
3353 | * essentially intel_wait_for_vblank. If we don't have this and don't | ||
3354 | * wait for vblanks until the end of crtc_enable, then the HW state | ||
3355 | * readout code will complain that the expected IPS_CTL value is not the | ||
3356 | * one we read. */ | ||
3357 | if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) | ||
3358 | DRM_ERROR("Timed out waiting for IPS enable\n"); | ||
3348 | } | 3359 | } |
3349 | 3360 | ||
3350 | static void hsw_disable_ips(struct intel_crtc *crtc) | 3361 | void hsw_disable_ips(struct intel_crtc *crtc) |
3351 | { | 3362 | { |
3352 | struct drm_device *dev = crtc->base.dev; | 3363 | struct drm_device *dev = crtc->base.dev; |
3353 | struct drm_i915_private *dev_priv = dev->dev_private; | 3364 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -3454,7 +3465,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
3454 | intel_update_watermarks(crtc); | 3465 | intel_update_watermarks(crtc); |
3455 | intel_enable_pipe(dev_priv, pipe, | 3466 | intel_enable_pipe(dev_priv, pipe, |
3456 | intel_crtc->config.has_pch_encoder, false); | 3467 | intel_crtc->config.has_pch_encoder, false); |
3457 | intel_enable_plane(dev_priv, plane, pipe); | 3468 | intel_enable_primary_plane(dev_priv, plane, pipe); |
3458 | intel_enable_planes(crtc); | 3469 | intel_enable_planes(crtc); |
3459 | intel_crtc_update_cursor(crtc, true); | 3470 | intel_crtc_update_cursor(crtc, true); |
3460 | 3471 | ||
@@ -3496,7 +3507,7 @@ static void haswell_crtc_enable_planes(struct drm_crtc *crtc) | |||
3496 | int pipe = intel_crtc->pipe; | 3507 | int pipe = intel_crtc->pipe; |
3497 | int plane = intel_crtc->plane; | 3508 | int plane = intel_crtc->plane; |
3498 | 3509 | ||
3499 | intel_enable_plane(dev_priv, plane, pipe); | 3510 | intel_enable_primary_plane(dev_priv, plane, pipe); |
3500 | intel_enable_planes(crtc); | 3511 | intel_enable_planes(crtc); |
3501 | intel_crtc_update_cursor(crtc, true); | 3512 | intel_crtc_update_cursor(crtc, true); |
3502 | 3513 | ||
@@ -3526,7 +3537,7 @@ static void haswell_crtc_disable_planes(struct drm_crtc *crtc) | |||
3526 | 3537 | ||
3527 | intel_crtc_update_cursor(crtc, false); | 3538 | intel_crtc_update_cursor(crtc, false); |
3528 | intel_disable_planes(crtc); | 3539 | intel_disable_planes(crtc); |
3529 | intel_disable_plane(dev_priv, plane, pipe); | 3540 | intel_disable_primary_plane(dev_priv, plane, pipe); |
3530 | } | 3541 | } |
3531 | 3542 | ||
3532 | /* | 3543 | /* |
@@ -3665,7 +3676,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3665 | 3676 | ||
3666 | intel_crtc_update_cursor(crtc, false); | 3677 | intel_crtc_update_cursor(crtc, false); |
3667 | intel_disable_planes(crtc); | 3678 | intel_disable_planes(crtc); |
3668 | intel_disable_plane(dev_priv, plane, pipe); | 3679 | intel_disable_primary_plane(dev_priv, plane, pipe); |
3669 | 3680 | ||
3670 | if (intel_crtc->config.has_pch_encoder) | 3681 | if (intel_crtc->config.has_pch_encoder) |
3671 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); | 3682 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); |
@@ -3873,7 +3884,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) | |||
3873 | 3884 | ||
3874 | intel_update_watermarks(crtc); | 3885 | intel_update_watermarks(crtc); |
3875 | intel_enable_pipe(dev_priv, pipe, false, is_dsi); | 3886 | intel_enable_pipe(dev_priv, pipe, false, is_dsi); |
3876 | intel_enable_plane(dev_priv, plane, pipe); | 3887 | intel_enable_primary_plane(dev_priv, plane, pipe); |
3877 | intel_enable_planes(crtc); | 3888 | intel_enable_planes(crtc); |
3878 | intel_crtc_update_cursor(crtc, true); | 3889 | intel_crtc_update_cursor(crtc, true); |
3879 | 3890 | ||
@@ -3911,7 +3922,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) | |||
3911 | 3922 | ||
3912 | intel_update_watermarks(crtc); | 3923 | intel_update_watermarks(crtc); |
3913 | intel_enable_pipe(dev_priv, pipe, false, false); | 3924 | intel_enable_pipe(dev_priv, pipe, false, false); |
3914 | intel_enable_plane(dev_priv, plane, pipe); | 3925 | intel_enable_primary_plane(dev_priv, plane, pipe); |
3915 | intel_enable_planes(crtc); | 3926 | intel_enable_planes(crtc); |
3916 | /* The fixup needs to happen before cursor is enabled */ | 3927 | /* The fixup needs to happen before cursor is enabled */ |
3917 | if (IS_G4X(dev)) | 3928 | if (IS_G4X(dev)) |
@@ -3967,7 +3978,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) | |||
3967 | intel_crtc_dpms_overlay(intel_crtc, false); | 3978 | intel_crtc_dpms_overlay(intel_crtc, false); |
3968 | intel_crtc_update_cursor(crtc, false); | 3979 | intel_crtc_update_cursor(crtc, false); |
3969 | intel_disable_planes(crtc); | 3980 | intel_disable_planes(crtc); |
3970 | intel_disable_plane(dev_priv, plane, pipe); | 3981 | intel_disable_primary_plane(dev_priv, plane, pipe); |
3971 | 3982 | ||
3972 | intel_disable_pipe(dev_priv, pipe); | 3983 | intel_disable_pipe(dev_priv, pipe); |
3973 | 3984 | ||
@@ -5201,10 +5212,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, | |||
5201 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; | 5212 | clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; |
5202 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; | 5213 | clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; |
5203 | 5214 | ||
5204 | clock.vco = refclk * clock.m1 * clock.m2 / clock.n; | 5215 | vlv_clock(refclk, &clock); |
5205 | clock.dot = 2 * clock.vco / (clock.p1 * clock.p2); | ||
5206 | 5216 | ||
5207 | pipe_config->port_clock = clock.dot / 10; | 5217 | /* clock.dot is the fast clock */ |
5218 | pipe_config->port_clock = clock.dot / 5; | ||
5208 | } | 5219 | } |
5209 | 5220 | ||
5210 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | 5221 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
@@ -6061,11 +6072,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
6061 | else | 6072 | else |
6062 | intel_crtc->lowfreq_avail = false; | 6073 | intel_crtc->lowfreq_avail = false; |
6063 | 6074 | ||
6064 | if (intel_crtc->config.has_pch_encoder) { | ||
6065 | pll = intel_crtc_to_shared_dpll(intel_crtc); | ||
6066 | |||
6067 | } | ||
6068 | |||
6069 | intel_set_pipe_timings(intel_crtc); | 6075 | intel_set_pipe_timings(intel_crtc); |
6070 | 6076 | ||
6071 | if (intel_crtc->config.has_pch_encoder) { | 6077 | if (intel_crtc->config.has_pch_encoder) { |
@@ -6712,6 +6718,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
6712 | return 0; | 6718 | return 0; |
6713 | } | 6719 | } |
6714 | 6720 | ||
6721 | static struct { | ||
6722 | int clock; | ||
6723 | u32 config; | ||
6724 | } hdmi_audio_clock[] = { | ||
6725 | { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 }, | ||
6726 | { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */ | ||
6727 | { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 }, | ||
6728 | { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 }, | ||
6729 | { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 }, | ||
6730 | { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 }, | ||
6731 | { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 }, | ||
6732 | { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 }, | ||
6733 | { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 }, | ||
6734 | { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, | ||
6735 | }; | ||
6736 | |||
6737 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ | ||
6738 | static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) | ||
6739 | { | ||
6740 | int i; | ||
6741 | |||
6742 | for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { | ||
6743 | if (mode->clock == hdmi_audio_clock[i].clock) | ||
6744 | break; | ||
6745 | } | ||
6746 | |||
6747 | if (i == ARRAY_SIZE(hdmi_audio_clock)) { | ||
6748 | DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); | ||
6749 | i = 1; | ||
6750 | } | ||
6751 | |||
6752 | DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n", | ||
6753 | hdmi_audio_clock[i].clock, | ||
6754 | hdmi_audio_clock[i].config); | ||
6755 | |||
6756 | return hdmi_audio_clock[i].config; | ||
6757 | } | ||
6758 | |||
6715 | static bool intel_eld_uptodate(struct drm_connector *connector, | 6759 | static bool intel_eld_uptodate(struct drm_connector *connector, |
6716 | int reg_eldv, uint32_t bits_eldv, | 6760 | int reg_eldv, uint32_t bits_eldv, |
6717 | int reg_elda, uint32_t bits_elda, | 6761 | int reg_elda, uint32_t bits_elda, |
@@ -6742,7 +6786,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector, | |||
6742 | } | 6786 | } |
6743 | 6787 | ||
6744 | static void g4x_write_eld(struct drm_connector *connector, | 6788 | static void g4x_write_eld(struct drm_connector *connector, |
6745 | struct drm_crtc *crtc) | 6789 | struct drm_crtc *crtc, |
6790 | struct drm_display_mode *mode) | ||
6746 | { | 6791 | { |
6747 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 6792 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6748 | uint8_t *eld = connector->eld; | 6793 | uint8_t *eld = connector->eld; |
@@ -6782,7 +6827,8 @@ static void g4x_write_eld(struct drm_connector *connector, | |||
6782 | } | 6827 | } |
6783 | 6828 | ||
6784 | static void haswell_write_eld(struct drm_connector *connector, | 6829 | static void haswell_write_eld(struct drm_connector *connector, |
6785 | struct drm_crtc *crtc) | 6830 | struct drm_crtc *crtc, |
6831 | struct drm_display_mode *mode) | ||
6786 | { | 6832 | { |
6787 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 6833 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6788 | uint8_t *eld = connector->eld; | 6834 | uint8_t *eld = connector->eld; |
@@ -6835,8 +6881,9 @@ static void haswell_write_eld(struct drm_connector *connector, | |||
6835 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 6881 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6836 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 6882 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6837 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ | 6883 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6838 | } else | 6884 | } else { |
6839 | I915_WRITE(aud_config, 0); | 6885 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); |
6886 | } | ||
6840 | 6887 | ||
6841 | if (intel_eld_uptodate(connector, | 6888 | if (intel_eld_uptodate(connector, |
6842 | aud_cntrl_st2, eldv, | 6889 | aud_cntrl_st2, eldv, |
@@ -6869,7 +6916,8 @@ static void haswell_write_eld(struct drm_connector *connector, | |||
6869 | } | 6916 | } |
6870 | 6917 | ||
6871 | static void ironlake_write_eld(struct drm_connector *connector, | 6918 | static void ironlake_write_eld(struct drm_connector *connector, |
6872 | struct drm_crtc *crtc) | 6919 | struct drm_crtc *crtc, |
6920 | struct drm_display_mode *mode) | ||
6873 | { | 6921 | { |
6874 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 6922 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
6875 | uint8_t *eld = connector->eld; | 6923 | uint8_t *eld = connector->eld; |
@@ -6913,8 +6961,9 @@ static void ironlake_write_eld(struct drm_connector *connector, | |||
6913 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | 6961 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6914 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | 6962 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ |
6915 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ | 6963 | I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */ |
6916 | } else | 6964 | } else { |
6917 | I915_WRITE(aud_config, 0); | 6965 | I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode)); |
6966 | } | ||
6918 | 6967 | ||
6919 | if (intel_eld_uptodate(connector, | 6968 | if (intel_eld_uptodate(connector, |
6920 | aud_cntrl_st2, eldv, | 6969 | aud_cntrl_st2, eldv, |
@@ -6964,7 +7013,7 @@ void intel_write_eld(struct drm_encoder *encoder, | |||
6964 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; | 7013 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; |
6965 | 7014 | ||
6966 | if (dev_priv->display.write_eld) | 7015 | if (dev_priv->display.write_eld) |
6967 | dev_priv->display.write_eld(connector, crtc); | 7016 | dev_priv->display.write_eld(connector, crtc, mode); |
6968 | } | 7017 | } |
6969 | 7018 | ||
6970 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | 7019 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) |
@@ -7271,14 +7320,21 @@ intel_framebuffer_create(struct drm_device *dev, | |||
7271 | return ERR_PTR(-ENOMEM); | 7320 | return ERR_PTR(-ENOMEM); |
7272 | } | 7321 | } |
7273 | 7322 | ||
7323 | ret = i915_mutex_lock_interruptible(dev); | ||
7324 | if (ret) | ||
7325 | goto err; | ||
7326 | |||
7274 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | 7327 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); |
7275 | if (ret) { | 7328 | mutex_unlock(&dev->struct_mutex); |
7276 | drm_gem_object_unreference_unlocked(&obj->base); | 7329 | if (ret) |
7277 | kfree(intel_fb); | 7330 | goto err; |
7278 | return ERR_PTR(ret); | ||
7279 | } | ||
7280 | 7331 | ||
7281 | return &intel_fb->base; | 7332 | return &intel_fb->base; |
7333 | err: | ||
7334 | drm_gem_object_unreference_unlocked(&obj->base); | ||
7335 | kfree(intel_fb); | ||
7336 | |||
7337 | return ERR_PTR(ret); | ||
7282 | } | 7338 | } |
7283 | 7339 | ||
7284 | static u32 | 7340 | static u32 |
@@ -7321,6 +7377,7 @@ static struct drm_framebuffer * | |||
7321 | mode_fits_in_fbdev(struct drm_device *dev, | 7377 | mode_fits_in_fbdev(struct drm_device *dev, |
7322 | struct drm_display_mode *mode) | 7378 | struct drm_display_mode *mode) |
7323 | { | 7379 | { |
7380 | #ifdef CONFIG_DRM_I915_FBDEV | ||
7324 | struct drm_i915_private *dev_priv = dev->dev_private; | 7381 | struct drm_i915_private *dev_priv = dev->dev_private; |
7325 | struct drm_i915_gem_object *obj; | 7382 | struct drm_i915_gem_object *obj; |
7326 | struct drm_framebuffer *fb; | 7383 | struct drm_framebuffer *fb; |
@@ -7341,6 +7398,9 @@ mode_fits_in_fbdev(struct drm_device *dev, | |||
7341 | return NULL; | 7398 | return NULL; |
7342 | 7399 | ||
7343 | return fb; | 7400 | return fb; |
7401 | #else | ||
7402 | return NULL; | ||
7403 | #endif | ||
7344 | } | 7404 | } |
7345 | 7405 | ||
7346 | bool intel_get_load_detect_pipe(struct drm_connector *connector, | 7406 | bool intel_get_load_detect_pipe(struct drm_connector *connector, |
@@ -9866,7 +9926,13 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
9866 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 9926 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
9867 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 9927 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
9868 | } else if (IS_VALLEYVIEW(dev)) { | 9928 | } else if (IS_VALLEYVIEW(dev)) { |
9869 | /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ | 9929 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { |
9930 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, | ||
9931 | PORT_B); | ||
9932 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) | ||
9933 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); | ||
9934 | } | ||
9935 | |||
9870 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { | 9936 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { |
9871 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, | 9937 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, |
9872 | PORT_C); | 9938 | PORT_C); |
@@ -9875,13 +9941,6 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
9875 | PORT_C); | 9941 | PORT_C); |
9876 | } | 9942 | } |
9877 | 9943 | ||
9878 | if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { | ||
9879 | intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, | ||
9880 | PORT_B); | ||
9881 | if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) | ||
9882 | intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); | ||
9883 | } | ||
9884 | |||
9885 | intel_dsi_init(dev); | 9944 | intel_dsi_init(dev); |
9886 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { | 9945 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
9887 | bool found = false; | 9946 | bool found = false; |
@@ -9938,6 +9997,7 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
9938 | void intel_framebuffer_fini(struct intel_framebuffer *fb) | 9997 | void intel_framebuffer_fini(struct intel_framebuffer *fb) |
9939 | { | 9998 | { |
9940 | drm_framebuffer_cleanup(&fb->base); | 9999 | drm_framebuffer_cleanup(&fb->base); |
10000 | WARN_ON(!fb->obj->framebuffer_references--); | ||
9941 | drm_gem_object_unreference_unlocked(&fb->obj->base); | 10001 | drm_gem_object_unreference_unlocked(&fb->obj->base); |
9942 | } | 10002 | } |
9943 | 10003 | ||
@@ -9969,9 +10029,12 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
9969 | struct drm_mode_fb_cmd2 *mode_cmd, | 10029 | struct drm_mode_fb_cmd2 *mode_cmd, |
9970 | struct drm_i915_gem_object *obj) | 10030 | struct drm_i915_gem_object *obj) |
9971 | { | 10031 | { |
10032 | int aligned_height, tile_height; | ||
9972 | int pitch_limit; | 10033 | int pitch_limit; |
9973 | int ret; | 10034 | int ret; |
9974 | 10035 | ||
10036 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
10037 | |||
9975 | if (obj->tiling_mode == I915_TILING_Y) { | 10038 | if (obj->tiling_mode == I915_TILING_Y) { |
9976 | DRM_DEBUG("hardware does not support tiling Y\n"); | 10039 | DRM_DEBUG("hardware does not support tiling Y\n"); |
9977 | return -EINVAL; | 10040 | return -EINVAL; |
@@ -10060,8 +10123,16 @@ int intel_framebuffer_init(struct drm_device *dev, | |||
10060 | if (mode_cmd->offsets[0] != 0) | 10123 | if (mode_cmd->offsets[0] != 0) |
10061 | return -EINVAL; | 10124 | return -EINVAL; |
10062 | 10125 | ||
10126 | tile_height = IS_GEN2(dev) ? 16 : 8; | ||
10127 | aligned_height = ALIGN(mode_cmd->height, | ||
10128 | obj->tiling_mode ? tile_height : 1); | ||
10129 | /* FIXME drm helper for size checks (especially planar formats)? */ | ||
10130 | if (obj->base.size < aligned_height * mode_cmd->pitches[0]) | ||
10131 | return -EINVAL; | ||
10132 | |||
10063 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); | 10133 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
10064 | intel_fb->obj = obj; | 10134 | intel_fb->obj = obj; |
10135 | intel_fb->obj->framebuffer_references++; | ||
10065 | 10136 | ||
10066 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); | 10137 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
10067 | if (ret) { | 10138 | if (ret) { |
@@ -10087,9 +10158,15 @@ intel_user_framebuffer_create(struct drm_device *dev, | |||
10087 | return intel_framebuffer_create(dev, mode_cmd, obj); | 10158 | return intel_framebuffer_create(dev, mode_cmd, obj); |
10088 | } | 10159 | } |
10089 | 10160 | ||
10161 | #ifndef CONFIG_DRM_I915_FBDEV | ||
10162 | static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) | ||
10163 | { | ||
10164 | } | ||
10165 | #endif | ||
10166 | |||
10090 | static const struct drm_mode_config_funcs intel_mode_funcs = { | 10167 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
10091 | .fb_create = intel_user_framebuffer_create, | 10168 | .fb_create = intel_user_framebuffer_create, |
10092 | .output_poll_changed = intel_fb_output_poll_changed, | 10169 | .output_poll_changed = intel_fbdev_output_poll_changed, |
10093 | }; | 10170 | }; |
10094 | 10171 | ||
10095 | /* Set up chip specific display functions */ | 10172 | /* Set up chip specific display functions */ |
@@ -10304,8 +10381,7 @@ static struct intel_quirk intel_quirks[] = { | |||
10304 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | 10381 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ |
10305 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | 10382 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, |
10306 | 10383 | ||
10307 | /* 830/845 need to leave pipe A & dpll A up */ | 10384 | /* 830 needs to leave pipe A & dpll A up */ |
10308 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | ||
10309 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | 10385 | { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, |
10310 | 10386 | ||
10311 | /* Lenovo U160 cannot use SSC on LVDS */ | 10387 | /* Lenovo U160 cannot use SSC on LVDS */ |
@@ -10674,7 +10750,7 @@ void i915_redisable_vga(struct drm_device *dev) | |||
10674 | (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) | 10750 | (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) |
10675 | return; | 10751 | return; |
10676 | 10752 | ||
10677 | if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { | 10753 | if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { |
10678 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); | 10754 | DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); |
10679 | i915_disable_vga(dev); | 10755 | i915_disable_vga(dev); |
10680 | i915_disable_vga_mem(dev); | 10756 | i915_disable_vga_mem(dev); |
@@ -10698,6 +10774,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
10698 | &crtc->config); | 10774 | &crtc->config); |
10699 | 10775 | ||
10700 | crtc->base.enabled = crtc->active; | 10776 | crtc->base.enabled = crtc->active; |
10777 | crtc->primary_enabled = crtc->active; | ||
10701 | 10778 | ||
10702 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", | 10779 | DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", |
10703 | crtc->base.base.id, | 10780 | crtc->base.base.id, |
@@ -10738,11 +10815,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
10738 | } | 10815 | } |
10739 | 10816 | ||
10740 | encoder->connectors_active = false; | 10817 | encoder->connectors_active = false; |
10741 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n", | 10818 | DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", |
10742 | encoder->base.base.id, | 10819 | encoder->base.base.id, |
10743 | drm_get_encoder_name(&encoder->base), | 10820 | drm_get_encoder_name(&encoder->base), |
10744 | encoder->base.crtc ? "enabled" : "disabled", | 10821 | encoder->base.crtc ? "enabled" : "disabled", |
10745 | pipe); | 10822 | pipe_name(pipe)); |
10746 | } | 10823 | } |
10747 | 10824 | ||
10748 | list_for_each_entry(connector, &dev->mode_config.connector_list, | 10825 | list_for_each_entry(connector, &dev->mode_config.connector_list, |
@@ -10815,6 +10892,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, | |||
10815 | pll->on = false; | 10892 | pll->on = false; |
10816 | } | 10893 | } |
10817 | 10894 | ||
10895 | if (IS_HASWELL(dev)) | ||
10896 | ilk_wm_get_hw_state(dev); | ||
10897 | |||
10818 | if (force_restore) { | 10898 | if (force_restore) { |
10819 | i915_redisable_vga(dev); | 10899 | i915_redisable_vga(dev); |
10820 | 10900 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d5bd349105e5..1e3d2720d811 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -822,10 +822,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
822 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 | 822 | /* Walk through all bpp values. Luckily they're all nicely spaced with 2 |
823 | * bpc in between. */ | 823 | * bpc in between. */ |
824 | bpp = pipe_config->pipe_bpp; | 824 | bpp = pipe_config->pipe_bpp; |
825 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { | 825 | if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && |
826 | dev_priv->vbt.edp_bpp < bpp) { | ||
826 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", | 827 | DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", |
827 | dev_priv->vbt.edp_bpp); | 828 | dev_priv->vbt.edp_bpp); |
828 | bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); | 829 | bpp = dev_priv->vbt.edp_bpp; |
829 | } | 830 | } |
830 | 831 | ||
831 | for (; bpp >= 6*3; bpp -= 2*3) { | 832 | for (; bpp >= 6*3; bpp -= 2*3) { |
@@ -2095,7 +2096,8 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) | |||
2095 | } | 2096 | } |
2096 | 2097 | ||
2097 | static void | 2098 | static void |
2098 | intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) | 2099 | intel_get_adjust_train(struct intel_dp *intel_dp, |
2100 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) | ||
2099 | { | 2101 | { |
2100 | uint8_t v = 0; | 2102 | uint8_t v = 0; |
2101 | uint8_t p = 0; | 2103 | uint8_t p = 0; |
@@ -2297,7 +2299,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
2297 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2299 | struct drm_device *dev = intel_dig_port->base.base.dev; |
2298 | struct drm_i915_private *dev_priv = dev->dev_private; | 2300 | struct drm_i915_private *dev_priv = dev->dev_private; |
2299 | enum port port = intel_dig_port->port; | 2301 | enum port port = intel_dig_port->port; |
2300 | int ret; | 2302 | uint8_t buf[sizeof(intel_dp->train_set) + 1]; |
2303 | int ret, len; | ||
2301 | 2304 | ||
2302 | if (HAS_DDI(dev)) { | 2305 | if (HAS_DDI(dev)) { |
2303 | uint32_t temp = I915_READ(DP_TP_CTL(port)); | 2306 | uint32_t temp = I915_READ(DP_TP_CTL(port)); |
@@ -2367,36 +2370,35 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
2367 | I915_WRITE(intel_dp->output_reg, *DP); | 2370 | I915_WRITE(intel_dp->output_reg, *DP); |
2368 | POSTING_READ(intel_dp->output_reg); | 2371 | POSTING_READ(intel_dp->output_reg); |
2369 | 2372 | ||
2370 | ret = intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, | 2373 | buf[0] = dp_train_pat; |
2371 | dp_train_pat); | 2374 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == |
2372 | if (ret != 1) | ||
2373 | return false; | ||
2374 | |||
2375 | if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != | ||
2376 | DP_TRAINING_PATTERN_DISABLE) { | 2375 | DP_TRAINING_PATTERN_DISABLE) { |
2377 | ret = intel_dp_aux_native_write(intel_dp, | 2376 | /* don't write DP_TRAINING_LANEx_SET on disable */ |
2378 | DP_TRAINING_LANE0_SET, | 2377 | len = 1; |
2379 | intel_dp->train_set, | 2378 | } else { |
2380 | intel_dp->lane_count); | 2379 | /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ |
2381 | if (ret != intel_dp->lane_count) | 2380 | memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); |
2382 | return false; | 2381 | len = intel_dp->lane_count + 1; |
2383 | } | 2382 | } |
2384 | 2383 | ||
2385 | return true; | 2384 | ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, |
2385 | buf, len); | ||
2386 | |||
2387 | return ret == len; | ||
2386 | } | 2388 | } |
2387 | 2389 | ||
2388 | static bool | 2390 | static bool |
2389 | intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, | 2391 | intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, |
2390 | uint8_t dp_train_pat) | 2392 | uint8_t dp_train_pat) |
2391 | { | 2393 | { |
2392 | memset(intel_dp->train_set, 0, 4); | 2394 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
2393 | intel_dp_set_signal_levels(intel_dp, DP); | 2395 | intel_dp_set_signal_levels(intel_dp, DP); |
2394 | return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); | 2396 | return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); |
2395 | } | 2397 | } |
2396 | 2398 | ||
2397 | static bool | 2399 | static bool |
2398 | intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, | 2400 | intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, |
2399 | uint8_t link_status[DP_LINK_STATUS_SIZE]) | 2401 | const uint8_t link_status[DP_LINK_STATUS_SIZE]) |
2400 | { | 2402 | { |
2401 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | 2403 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
2402 | struct drm_device *dev = intel_dig_port->base.base.dev; | 2404 | struct drm_device *dev = intel_dig_port->base.base.dev; |
@@ -2507,7 +2509,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
2507 | if (i == intel_dp->lane_count) { | 2509 | if (i == intel_dp->lane_count) { |
2508 | ++loop_tries; | 2510 | ++loop_tries; |
2509 | if (loop_tries == 5) { | 2511 | if (loop_tries == 5) { |
2510 | DRM_DEBUG_KMS("too many full retries, give up\n"); | 2512 | DRM_ERROR("too many full retries, give up\n"); |
2511 | break; | 2513 | break; |
2512 | } | 2514 | } |
2513 | intel_dp_reset_link_train(intel_dp, &DP, | 2515 | intel_dp_reset_link_train(intel_dp, &DP, |
@@ -2521,7 +2523,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
2521 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 2523 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
2522 | ++voltage_tries; | 2524 | ++voltage_tries; |
2523 | if (voltage_tries == 5) { | 2525 | if (voltage_tries == 5) { |
2524 | DRM_DEBUG_KMS("too many voltage retries, give up\n"); | 2526 | DRM_ERROR("too many voltage retries, give up\n"); |
2525 | break; | 2527 | break; |
2526 | } | 2528 | } |
2527 | } else | 2529 | } else |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index eaf0003ddfd9..e33f387d4185 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -309,6 +309,12 @@ struct intel_crtc_config { | |||
309 | bool double_wide; | 309 | bool double_wide; |
310 | }; | 310 | }; |
311 | 311 | ||
312 | struct intel_pipe_wm { | ||
313 | struct intel_wm_level wm[5]; | ||
314 | uint32_t linetime; | ||
315 | bool fbc_wm_enabled; | ||
316 | }; | ||
317 | |||
312 | struct intel_crtc { | 318 | struct intel_crtc { |
313 | struct drm_crtc base; | 319 | struct drm_crtc base; |
314 | enum pipe pipe; | 320 | enum pipe pipe; |
@@ -321,7 +327,7 @@ struct intel_crtc { | |||
321 | */ | 327 | */ |
322 | bool active; | 328 | bool active; |
323 | bool eld_vld; | 329 | bool eld_vld; |
324 | bool primary_disabled; /* is the crtc obscured by a plane? */ | 330 | bool primary_enabled; /* is the primary plane (partially) visible? */ |
325 | bool lowfreq_avail; | 331 | bool lowfreq_avail; |
326 | struct intel_overlay *overlay; | 332 | struct intel_overlay *overlay; |
327 | struct intel_unpin_work *unpin_work; | 333 | struct intel_unpin_work *unpin_work; |
@@ -349,6 +355,12 @@ struct intel_crtc { | |||
349 | /* Access to these should be protected by dev_priv->irq_lock. */ | 355 | /* Access to these should be protected by dev_priv->irq_lock. */ |
350 | bool cpu_fifo_underrun_disabled; | 356 | bool cpu_fifo_underrun_disabled; |
351 | bool pch_fifo_underrun_disabled; | 357 | bool pch_fifo_underrun_disabled; |
358 | |||
359 | /* per-pipe watermark state */ | ||
360 | struct { | ||
361 | /* watermarks currently being used */ | ||
362 | struct intel_pipe_wm active; | ||
363 | } wm; | ||
352 | }; | 364 | }; |
353 | 365 | ||
354 | struct intel_plane_wm_parameters { | 366 | struct intel_plane_wm_parameters { |
@@ -677,6 +689,8 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, | |||
677 | int dotclock); | 689 | int dotclock); |
678 | bool intel_crtc_active(struct drm_crtc *crtc); | 690 | bool intel_crtc_active(struct drm_crtc *crtc); |
679 | void i915_disable_vga_mem(struct drm_device *dev); | 691 | void i915_disable_vga_mem(struct drm_device *dev); |
692 | void hsw_enable_ips(struct intel_crtc *crtc); | ||
693 | void hsw_disable_ips(struct intel_crtc *crtc); | ||
680 | 694 | ||
681 | 695 | ||
682 | /* intel_dp.c */ | 696 | /* intel_dp.c */ |
@@ -711,14 +725,36 @@ bool intel_dsi_init(struct drm_device *dev); | |||
711 | void intel_dvo_init(struct drm_device *dev); | 725 | void intel_dvo_init(struct drm_device *dev); |
712 | 726 | ||
713 | 727 | ||
714 | /* intel_fb.c */ | 728 | /* legacy fbdev emulation in intel_fbdev.c */ |
715 | int intel_fbdev_init(struct drm_device *dev); | 729 | #ifdef CONFIG_DRM_I915_FBDEV |
716 | void intel_fbdev_initial_config(struct drm_device *dev); | 730 | extern int intel_fbdev_init(struct drm_device *dev); |
717 | void intel_fbdev_fini(struct drm_device *dev); | 731 | extern void intel_fbdev_initial_config(struct drm_device *dev); |
718 | void intel_fbdev_set_suspend(struct drm_device *dev, int state); | 732 | extern void intel_fbdev_fini(struct drm_device *dev); |
719 | void intel_fb_output_poll_changed(struct drm_device *dev); | 733 | extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); |
720 | void intel_fb_restore_mode(struct drm_device *dev); | 734 | extern void intel_fbdev_output_poll_changed(struct drm_device *dev); |
735 | extern void intel_fbdev_restore_mode(struct drm_device *dev); | ||
736 | #else | ||
737 | static inline int intel_fbdev_init(struct drm_device *dev) | ||
738 | { | ||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static inline void intel_fbdev_initial_config(struct drm_device *dev) | ||
743 | { | ||
744 | } | ||
745 | |||
746 | static inline void intel_fbdev_fini(struct drm_device *dev) | ||
747 | { | ||
748 | } | ||
749 | |||
750 | static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state) | ||
751 | { | ||
752 | } | ||
721 | 753 | ||
754 | static inline void intel_fbdev_restore_mode(struct drm_device *dev) | ||
755 | { | ||
756 | } | ||
757 | #endif | ||
722 | 758 | ||
723 | /* intel_hdmi.c */ | 759 | /* intel_hdmi.c */ |
724 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); | 760 | void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); |
@@ -799,8 +835,11 @@ void intel_enable_gt_powersave(struct drm_device *dev); | |||
799 | void intel_disable_gt_powersave(struct drm_device *dev); | 835 | void intel_disable_gt_powersave(struct drm_device *dev); |
800 | void ironlake_teardown_rc6(struct drm_device *dev); | 836 | void ironlake_teardown_rc6(struct drm_device *dev); |
801 | void gen6_update_ring_freq(struct drm_device *dev); | 837 | void gen6_update_ring_freq(struct drm_device *dev); |
838 | void gen6_rps_idle(struct drm_i915_private *dev_priv); | ||
839 | void gen6_rps_boost(struct drm_i915_private *dev_priv); | ||
802 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | 840 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); |
803 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | 841 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); |
842 | void ilk_wm_get_hw_state(struct drm_device *dev); | ||
804 | 843 | ||
805 | 844 | ||
806 | /* intel_sdvo.c */ | 845 | /* intel_sdvo.c */ |
@@ -809,7 +848,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); | |||
809 | 848 | ||
810 | /* intel_sprite.c */ | 849 | /* intel_sprite.c */ |
811 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); | 850 | int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); |
812 | void intel_flush_display_plane(struct drm_i915_private *dev_priv, | 851 | void intel_flush_primary_plane(struct drm_i915_private *dev_priv, |
813 | enum plane plane); | 852 | enum plane plane); |
814 | void intel_plane_restore(struct drm_plane *plane); | 853 | void intel_plane_restore(struct drm_plane *plane); |
815 | void intel_plane_disable(struct drm_plane *plane); | 854 | void intel_plane_disable(struct drm_plane *plane); |
@@ -822,7 +861,4 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, | |||
822 | /* intel_tv.c */ | 861 | /* intel_tv.c */ |
823 | void intel_tv_init(struct drm_device *dev); | 862 | void intel_tv_init(struct drm_device *dev); |
824 | 863 | ||
825 | void gen6_rps_idle(struct drm_i915_private *dev_priv); | ||
826 | void gen6_rps_boost(struct drm_i915_private *dev_priv); | ||
827 | |||
828 | #endif /* __INTEL_DRV_H__ */ | 864 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 9a2fdd2a7e34..d257b093ca68 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -350,7 +350,7 @@ static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) | |||
350 | unsigned int bpp = intel_crtc->config.pipe_bpp; | 350 | unsigned int bpp = intel_crtc->config.pipe_bpp; |
351 | u32 val, tmp; | 351 | u32 val, tmp; |
352 | 352 | ||
353 | DRM_DEBUG_KMS("pipe %d\n", pipe); | 353 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); |
354 | 354 | ||
355 | /* Update the DSI PLL */ | 355 | /* Update the DSI PLL */ |
356 | vlv_enable_dsi_pll(intel_encoder); | 356 | vlv_enable_dsi_pll(intel_encoder); |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fbdev.c index d883b77b1b78..acc839569c3f 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -299,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) | |||
299 | 299 | ||
300 | MODULE_LICENSE("GPL and additional rights"); | 300 | MODULE_LICENSE("GPL and additional rights"); |
301 | 301 | ||
302 | void intel_fb_output_poll_changed(struct drm_device *dev) | 302 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
303 | { | 303 | { |
304 | struct drm_i915_private *dev_priv = dev->dev_private; | 304 | struct drm_i915_private *dev_priv = dev->dev_private; |
305 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); | 305 | drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); |
306 | } | 306 | } |
307 | 307 | ||
308 | void intel_fb_restore_mode(struct drm_device *dev) | 308 | void intel_fbdev_restore_mode(struct drm_device *dev) |
309 | { | 309 | { |
310 | int ret; | 310 | int ret; |
311 | struct drm_i915_private *dev_priv = dev->dev_private; | 311 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 4f4d346db8f0..51a8336dec2e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1074,7 +1074,7 @@ done: | |||
1074 | return 0; | 1074 | return 0; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | static void intel_hdmi_pre_enable(struct intel_encoder *encoder) | 1077 | static void vlv_hdmi_pre_enable(struct intel_encoder *encoder) |
1078 | { | 1078 | { |
1079 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1079 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1080 | struct drm_device *dev = encoder->base.dev; | 1080 | struct drm_device *dev = encoder->base.dev; |
@@ -1127,7 +1127,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1127 | vlv_wait_port_ready(dev_priv, port); | 1127 | vlv_wait_port_ready(dev_priv, port); |
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) | 1130 | static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) |
1131 | { | 1131 | { |
1132 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1132 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1133 | struct drm_device *dev = encoder->base.dev; | 1133 | struct drm_device *dev = encoder->base.dev; |
@@ -1163,7 +1163,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) | |||
1163 | mutex_unlock(&dev_priv->dpio_lock); | 1163 | mutex_unlock(&dev_priv->dpio_lock); |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | static void intel_hdmi_post_disable(struct intel_encoder *encoder) | 1166 | static void vlv_hdmi_post_disable(struct intel_encoder *encoder) |
1167 | { | 1167 | { |
1168 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1168 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1169 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | 1169 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
@@ -1313,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) | |||
1313 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; | 1313 | intel_encoder->get_hw_state = intel_hdmi_get_hw_state; |
1314 | intel_encoder->get_config = intel_hdmi_get_config; | 1314 | intel_encoder->get_config = intel_hdmi_get_config; |
1315 | if (IS_VALLEYVIEW(dev)) { | 1315 | if (IS_VALLEYVIEW(dev)) { |
1316 | intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; | 1316 | intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable; |
1317 | intel_encoder->pre_enable = intel_hdmi_pre_enable; | 1317 | intel_encoder->pre_enable = vlv_hdmi_pre_enable; |
1318 | intel_encoder->enable = vlv_enable_hdmi; | 1318 | intel_encoder->enable = vlv_enable_hdmi; |
1319 | intel_encoder->post_disable = intel_hdmi_post_disable; | 1319 | intel_encoder->post_disable = vlv_hdmi_post_disable; |
1320 | } else { | 1320 | } else { |
1321 | intel_encoder->enable = intel_enable_hdmi; | 1321 | intel_encoder->enable = intel_enable_hdmi; |
1322 | } | 1322 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 2acf5cae20e4..b82050c96f3e 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -258,7 +258,9 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) | |||
258 | /* Driver sleep timeout in ms. */ | 258 | /* Driver sleep timeout in ms. */ |
259 | dslp = ioread32(&swsci->dslp); | 259 | dslp = ioread32(&swsci->dslp); |
260 | if (!dslp) { | 260 | if (!dslp) { |
261 | dslp = 2; | 261 | /* The spec says 2ms should be the default, but it's too small |
262 | * for some machines. */ | ||
263 | dslp = 50; | ||
262 | } else if (dslp > 500) { | 264 | } else if (dslp > 500) { |
263 | /* Hey bios, trust must be earned. */ | 265 | /* Hey bios, trust must be earned. */ |
264 | WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); | 266 | WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp); |
@@ -405,6 +407,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) | |||
405 | if (bclp > 255) | 407 | if (bclp > 255) |
406 | return ASLC_BACKLIGHT_FAILED; | 408 | return ASLC_BACKLIGHT_FAILED; |
407 | 409 | ||
410 | DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp); | ||
408 | intel_panel_set_backlight(dev, bclp, 255); | 411 | intel_panel_set_backlight(dev, bclp, 255); |
409 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); | 412 | iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); |
410 | 413 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c81020923ee4..09b2994c9b37 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -574,6 +574,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
574 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); | 574 | intel_pipe_to_cpu_transcoder(dev_priv, pipe); |
575 | unsigned long flags; | 575 | unsigned long flags; |
576 | 576 | ||
577 | DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); | ||
578 | |||
577 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 579 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
578 | 580 | ||
579 | if (dev_priv->backlight.level == 0) { | 581 | if (dev_priv->backlight.level == 0) { |
@@ -680,6 +682,8 @@ intel_panel_detect(struct drm_device *dev) | |||
680 | static int intel_panel_update_status(struct backlight_device *bd) | 682 | static int intel_panel_update_status(struct backlight_device *bd) |
681 | { | 683 | { |
682 | struct drm_device *dev = bl_get_data(bd); | 684 | struct drm_device *dev = bl_get_data(bd); |
685 | DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n", | ||
686 | bd->props.brightness, bd->props.max_brightness); | ||
683 | intel_panel_set_backlight(dev, bd->props.brightness, | 687 | intel_panel_set_backlight(dev, bd->props.brightness, |
684 | bd->props.max_brightness); | 688 | bd->props.max_brightness); |
685 | return 0; | 689 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 008ec0bb017f..8064ff927bcc 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -475,7 +475,7 @@ void intel_update_fbc(struct drm_device *dev) | |||
475 | */ | 475 | */ |
476 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { | 476 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
477 | if (intel_crtc_active(tmp_crtc) && | 477 | if (intel_crtc_active(tmp_crtc) && |
478 | !to_intel_crtc(tmp_crtc)->primary_disabled) { | 478 | to_intel_crtc(tmp_crtc)->primary_enabled) { |
479 | if (crtc) { | 479 | if (crtc) { |
480 | if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) | 480 | if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) |
481 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | 481 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); |
@@ -2200,20 +2200,11 @@ struct hsw_wm_maximums { | |||
2200 | uint16_t fbc; | 2200 | uint16_t fbc; |
2201 | }; | 2201 | }; |
2202 | 2202 | ||
2203 | struct hsw_wm_values { | ||
2204 | uint32_t wm_pipe[3]; | ||
2205 | uint32_t wm_lp[3]; | ||
2206 | uint32_t wm_lp_spr[3]; | ||
2207 | uint32_t wm_linetime[3]; | ||
2208 | bool enable_fbc_wm; | ||
2209 | }; | ||
2210 | |||
2211 | /* used in computing the new watermarks state */ | 2203 | /* used in computing the new watermarks state */ |
2212 | struct intel_wm_config { | 2204 | struct intel_wm_config { |
2213 | unsigned int num_pipes_active; | 2205 | unsigned int num_pipes_active; |
2214 | bool sprites_enabled; | 2206 | bool sprites_enabled; |
2215 | bool sprites_scaled; | 2207 | bool sprites_scaled; |
2216 | bool fbc_wm_enabled; | ||
2217 | }; | 2208 | }; |
2218 | 2209 | ||
2219 | /* | 2210 | /* |
@@ -2380,11 +2371,11 @@ static unsigned int ilk_fbc_wm_max(void) | |||
2380 | return 15; | 2371 | return 15; |
2381 | } | 2372 | } |
2382 | 2373 | ||
2383 | static void ilk_wm_max(struct drm_device *dev, | 2374 | static void ilk_compute_wm_maximums(struct drm_device *dev, |
2384 | int level, | 2375 | int level, |
2385 | const struct intel_wm_config *config, | 2376 | const struct intel_wm_config *config, |
2386 | enum intel_ddb_partitioning ddb_partitioning, | 2377 | enum intel_ddb_partitioning ddb_partitioning, |
2387 | struct hsw_wm_maximums *max) | 2378 | struct hsw_wm_maximums *max) |
2388 | { | 2379 | { |
2389 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); | 2380 | max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); |
2390 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); | 2381 | max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); |
@@ -2392,9 +2383,9 @@ static void ilk_wm_max(struct drm_device *dev, | |||
2392 | max->fbc = ilk_fbc_wm_max(); | 2383 | max->fbc = ilk_fbc_wm_max(); |
2393 | } | 2384 | } |
2394 | 2385 | ||
2395 | static bool ilk_check_wm(int level, | 2386 | static bool ilk_validate_wm_level(int level, |
2396 | const struct hsw_wm_maximums *max, | 2387 | const struct hsw_wm_maximums *max, |
2397 | struct intel_wm_level *result) | 2388 | struct intel_wm_level *result) |
2398 | { | 2389 | { |
2399 | bool ret; | 2390 | bool ret; |
2400 | 2391 | ||
@@ -2430,8 +2421,6 @@ static bool ilk_check_wm(int level, | |||
2430 | result->enable = true; | 2421 | result->enable = true; |
2431 | } | 2422 | } |
2432 | 2423 | ||
2433 | DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis"); | ||
2434 | |||
2435 | return ret; | 2424 | return ret; |
2436 | } | 2425 | } |
2437 | 2426 | ||
@@ -2458,53 +2447,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, | |||
2458 | result->enable = true; | 2447 | result->enable = true; |
2459 | } | 2448 | } |
2460 | 2449 | ||
2461 | static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, | ||
2462 | int level, const struct hsw_wm_maximums *max, | ||
2463 | const struct hsw_pipe_wm_parameters *params, | ||
2464 | struct intel_wm_level *result) | ||
2465 | { | ||
2466 | enum pipe pipe; | ||
2467 | struct intel_wm_level res[3]; | ||
2468 | |||
2469 | for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) | ||
2470 | ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]); | ||
2471 | |||
2472 | result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val); | ||
2473 | result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val); | ||
2474 | result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val); | ||
2475 | result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); | ||
2476 | result->enable = true; | ||
2477 | |||
2478 | return ilk_check_wm(level, max, result); | ||
2479 | } | ||
2480 | |||
2481 | |||
2482 | static uint32_t hsw_compute_wm_pipe(struct drm_device *dev, | ||
2483 | const struct hsw_pipe_wm_parameters *params) | ||
2484 | { | ||
2485 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2486 | struct intel_wm_config config = { | ||
2487 | .num_pipes_active = 1, | ||
2488 | .sprites_enabled = params->spr.enabled, | ||
2489 | .sprites_scaled = params->spr.scaled, | ||
2490 | }; | ||
2491 | struct hsw_wm_maximums max; | ||
2492 | struct intel_wm_level res; | ||
2493 | |||
2494 | if (!params->active) | ||
2495 | return 0; | ||
2496 | |||
2497 | ilk_wm_max(dev, 0, &config, INTEL_DDB_PART_1_2, &max); | ||
2498 | |||
2499 | ilk_compute_wm_level(dev_priv, 0, params, &res); | ||
2500 | |||
2501 | ilk_check_wm(0, &max, &res); | ||
2502 | |||
2503 | return (res.pri_val << WM0_PIPE_PLANE_SHIFT) | | ||
2504 | (res.spr_val << WM0_PIPE_SPRITE_SHIFT) | | ||
2505 | res.cur_val; | ||
2506 | } | ||
2507 | |||
2508 | static uint32_t | 2450 | static uint32_t |
2509 | hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) | 2451 | hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) |
2510 | { | 2452 | { |
@@ -2631,29 +2573,17 @@ static void intel_setup_wm_latency(struct drm_device *dev) | |||
2631 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); | 2573 | intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); |
2632 | } | 2574 | } |
2633 | 2575 | ||
2634 | static void hsw_compute_wm_parameters(struct drm_device *dev, | 2576 | static void hsw_compute_wm_parameters(struct drm_crtc *crtc, |
2635 | struct hsw_pipe_wm_parameters *params, | 2577 | struct hsw_pipe_wm_parameters *p, |
2636 | struct hsw_wm_maximums *lp_max_1_2, | 2578 | struct intel_wm_config *config) |
2637 | struct hsw_wm_maximums *lp_max_5_6) | ||
2638 | { | 2579 | { |
2639 | struct drm_crtc *crtc; | 2580 | struct drm_device *dev = crtc->dev; |
2581 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2582 | enum pipe pipe = intel_crtc->pipe; | ||
2640 | struct drm_plane *plane; | 2583 | struct drm_plane *plane; |
2641 | enum pipe pipe; | ||
2642 | struct intel_wm_config config = {}; | ||
2643 | |||
2644 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
2645 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2646 | struct hsw_pipe_wm_parameters *p; | ||
2647 | |||
2648 | pipe = intel_crtc->pipe; | ||
2649 | p = ¶ms[pipe]; | ||
2650 | |||
2651 | p->active = intel_crtc_active(crtc); | ||
2652 | if (!p->active) | ||
2653 | continue; | ||
2654 | |||
2655 | config.num_pipes_active++; | ||
2656 | 2584 | ||
2585 | p->active = intel_crtc_active(crtc); | ||
2586 | if (p->active) { | ||
2657 | p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; | 2587 | p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; |
2658 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); | 2588 | p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); |
2659 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; | 2589 | p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; |
@@ -2665,66 +2595,132 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, | |||
2665 | p->cur.enabled = true; | 2595 | p->cur.enabled = true; |
2666 | } | 2596 | } |
2667 | 2597 | ||
2598 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
2599 | config->num_pipes_active += intel_crtc_active(crtc); | ||
2600 | |||
2668 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { | 2601 | list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
2669 | struct intel_plane *intel_plane = to_intel_plane(plane); | 2602 | struct intel_plane *intel_plane = to_intel_plane(plane); |
2670 | struct hsw_pipe_wm_parameters *p; | ||
2671 | |||
2672 | pipe = intel_plane->pipe; | ||
2673 | p = ¶ms[pipe]; | ||
2674 | 2603 | ||
2675 | p->spr = intel_plane->wm; | 2604 | if (intel_plane->pipe == pipe) |
2605 | p->spr = intel_plane->wm; | ||
2676 | 2606 | ||
2677 | config.sprites_enabled |= p->spr.enabled; | 2607 | config->sprites_enabled |= intel_plane->wm.enabled; |
2678 | config.sprites_scaled |= p->spr.scaled; | 2608 | config->sprites_scaled |= intel_plane->wm.scaled; |
2679 | } | 2609 | } |
2610 | } | ||
2680 | 2611 | ||
2681 | ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); | 2612 | /* Compute new watermarks for the pipe */ |
2613 | static bool intel_compute_pipe_wm(struct drm_crtc *crtc, | ||
2614 | const struct hsw_pipe_wm_parameters *params, | ||
2615 | struct intel_pipe_wm *pipe_wm) | ||
2616 | { | ||
2617 | struct drm_device *dev = crtc->dev; | ||
2618 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2619 | int level, max_level = ilk_wm_max_level(dev); | ||
2620 | /* LP0 watermark maximums depend on this pipe alone */ | ||
2621 | struct intel_wm_config config = { | ||
2622 | .num_pipes_active = 1, | ||
2623 | .sprites_enabled = params->spr.enabled, | ||
2624 | .sprites_scaled = params->spr.scaled, | ||
2625 | }; | ||
2626 | struct hsw_wm_maximums max; | ||
2682 | 2627 | ||
2683 | /* 5/6 split only in single pipe config on IVB+ */ | 2628 | /* LP0 watermarks always use 1/2 DDB partitioning */ |
2684 | if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) | 2629 | ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); |
2685 | ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); | 2630 | |
2686 | else | 2631 | for (level = 0; level <= max_level; level++) |
2687 | *lp_max_5_6 = *lp_max_1_2; | 2632 | ilk_compute_wm_level(dev_priv, level, params, |
2633 | &pipe_wm->wm[level]); | ||
2634 | |||
2635 | pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); | ||
2636 | |||
2637 | /* At least LP0 must be valid */ | ||
2638 | return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]); | ||
2688 | } | 2639 | } |
2689 | 2640 | ||
2690 | static void hsw_compute_wm_results(struct drm_device *dev, | 2641 | /* |
2691 | const struct hsw_pipe_wm_parameters *params, | 2642 | * Merge the watermarks from all active pipes for a specific level. |
2692 | const struct hsw_wm_maximums *lp_maximums, | 2643 | */ |
2693 | struct hsw_wm_values *results) | 2644 | static void ilk_merge_wm_level(struct drm_device *dev, |
2645 | int level, | ||
2646 | struct intel_wm_level *ret_wm) | ||
2694 | { | 2647 | { |
2695 | struct drm_i915_private *dev_priv = dev->dev_private; | 2648 | const struct intel_crtc *intel_crtc; |
2696 | struct drm_crtc *crtc; | ||
2697 | struct intel_wm_level lp_results[4] = {}; | ||
2698 | enum pipe pipe; | ||
2699 | int level, max_level, wm_lp; | ||
2700 | 2649 | ||
2701 | for (level = 1; level <= 4; level++) | 2650 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { |
2702 | if (!hsw_compute_lp_wm(dev_priv, level, | 2651 | const struct intel_wm_level *wm = |
2703 | lp_maximums, params, | 2652 | &intel_crtc->wm.active.wm[level]; |
2704 | &lp_results[level - 1])) | 2653 | |
2705 | break; | 2654 | if (!wm->enable) |
2706 | max_level = level - 1; | 2655 | return; |
2707 | 2656 | ||
2708 | memset(results, 0, sizeof(*results)); | 2657 | ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); |
2658 | ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); | ||
2659 | ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); | ||
2660 | ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); | ||
2661 | } | ||
2662 | |||
2663 | ret_wm->enable = true; | ||
2664 | } | ||
2709 | 2665 | ||
2710 | /* The spec says it is preferred to disable FBC WMs instead of disabling | 2666 | /* |
2711 | * a WM level. */ | 2667 | * Merge all low power watermarks for all active pipes. |
2712 | results->enable_fbc_wm = true; | 2668 | */ |
2669 | static void ilk_wm_merge(struct drm_device *dev, | ||
2670 | const struct hsw_wm_maximums *max, | ||
2671 | struct intel_pipe_wm *merged) | ||
2672 | { | ||
2673 | int level, max_level = ilk_wm_max_level(dev); | ||
2674 | |||
2675 | merged->fbc_wm_enabled = true; | ||
2676 | |||
2677 | /* merge each WM1+ level */ | ||
2713 | for (level = 1; level <= max_level; level++) { | 2678 | for (level = 1; level <= max_level; level++) { |
2714 | if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { | 2679 | struct intel_wm_level *wm = &merged->wm[level]; |
2715 | results->enable_fbc_wm = false; | 2680 | |
2716 | lp_results[level - 1].fbc_val = 0; | 2681 | ilk_merge_wm_level(dev, level, wm); |
2682 | |||
2683 | if (!ilk_validate_wm_level(level, max, wm)) | ||
2684 | break; | ||
2685 | |||
2686 | /* | ||
2687 | * The spec says it is preferred to disable | ||
2688 | * FBC WMs instead of disabling a WM level. | ||
2689 | */ | ||
2690 | if (wm->fbc_val > max->fbc) { | ||
2691 | merged->fbc_wm_enabled = false; | ||
2692 | wm->fbc_val = 0; | ||
2717 | } | 2693 | } |
2718 | } | 2694 | } |
2695 | } | ||
2719 | 2696 | ||
2697 | static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) | ||
2698 | { | ||
2699 | /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ | ||
2700 | return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); | ||
2701 | } | ||
2702 | |||
2703 | static void hsw_compute_wm_results(struct drm_device *dev, | ||
2704 | const struct intel_pipe_wm *merged, | ||
2705 | enum intel_ddb_partitioning partitioning, | ||
2706 | struct hsw_wm_values *results) | ||
2707 | { | ||
2708 | struct intel_crtc *intel_crtc; | ||
2709 | int level, wm_lp; | ||
2710 | |||
2711 | results->enable_fbc_wm = merged->fbc_wm_enabled; | ||
2712 | results->partitioning = partitioning; | ||
2713 | |||
2714 | /* LP1+ register values */ | ||
2720 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | 2715 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { |
2721 | const struct intel_wm_level *r; | 2716 | const struct intel_wm_level *r; |
2722 | 2717 | ||
2723 | level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; | 2718 | level = ilk_wm_lp_to_level(wm_lp, merged); |
2724 | if (level > max_level) | 2719 | |
2720 | r = &merged->wm[level]; | ||
2721 | if (!r->enable) | ||
2725 | break; | 2722 | break; |
2726 | 2723 | ||
2727 | r = &lp_results[level - 1]; | ||
2728 | results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, | 2724 | results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2, |
2729 | r->fbc_val, | 2725 | r->fbc_val, |
2730 | r->pri_val, | 2726 | r->pri_val, |
@@ -2732,116 +2728,158 @@ static void hsw_compute_wm_results(struct drm_device *dev, | |||
2732 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; | 2728 | results->wm_lp_spr[wm_lp - 1] = r->spr_val; |
2733 | } | 2729 | } |
2734 | 2730 | ||
2735 | for_each_pipe(pipe) | 2731 | /* LP0 register values */ |
2736 | results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev, | 2732 | list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) { |
2737 | ¶ms[pipe]); | 2733 | enum pipe pipe = intel_crtc->pipe; |
2734 | const struct intel_wm_level *r = | ||
2735 | &intel_crtc->wm.active.wm[0]; | ||
2738 | 2736 | ||
2739 | for_each_pipe(pipe) { | 2737 | if (WARN_ON(!r->enable)) |
2740 | crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | 2738 | continue; |
2741 | results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc); | 2739 | |
2740 | results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; | ||
2741 | |||
2742 | results->wm_pipe[pipe] = | ||
2743 | (r->pri_val << WM0_PIPE_PLANE_SHIFT) | | ||
2744 | (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | | ||
2745 | r->cur_val; | ||
2742 | } | 2746 | } |
2743 | } | 2747 | } |
2744 | 2748 | ||
2745 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in | 2749 | /* Find the result with the highest level enabled. Check for enable_fbc_wm in |
2746 | * case both are at the same level. Prefer r1 in case they're the same. */ | 2750 | * case both are at the same level. Prefer r1 in case they're the same. */ |
2747 | static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, | 2751 | static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev, |
2748 | struct hsw_wm_values *r2) | 2752 | struct intel_pipe_wm *r1, |
2753 | struct intel_pipe_wm *r2) | ||
2749 | { | 2754 | { |
2750 | int i, val_r1 = 0, val_r2 = 0; | 2755 | int level, max_level = ilk_wm_max_level(dev); |
2756 | int level1 = 0, level2 = 0; | ||
2751 | 2757 | ||
2752 | for (i = 0; i < 3; i++) { | 2758 | for (level = 1; level <= max_level; level++) { |
2753 | if (r1->wm_lp[i] & WM3_LP_EN) | 2759 | if (r1->wm[level].enable) |
2754 | val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK; | 2760 | level1 = level; |
2755 | if (r2->wm_lp[i] & WM3_LP_EN) | 2761 | if (r2->wm[level].enable) |
2756 | val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK; | 2762 | level2 = level; |
2757 | } | 2763 | } |
2758 | 2764 | ||
2759 | if (val_r1 == val_r2) { | 2765 | if (level1 == level2) { |
2760 | if (r2->enable_fbc_wm && !r1->enable_fbc_wm) | 2766 | if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) |
2761 | return r2; | 2767 | return r2; |
2762 | else | 2768 | else |
2763 | return r1; | 2769 | return r1; |
2764 | } else if (val_r1 > val_r2) { | 2770 | } else if (level1 > level2) { |
2765 | return r1; | 2771 | return r1; |
2766 | } else { | 2772 | } else { |
2767 | return r2; | 2773 | return r2; |
2768 | } | 2774 | } |
2769 | } | 2775 | } |
2770 | 2776 | ||
2777 | /* dirty bits used to track which watermarks need changes */ | ||
2778 | #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) | ||
2779 | #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) | ||
2780 | #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) | ||
2781 | #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) | ||
2782 | #define WM_DIRTY_FBC (1 << 24) | ||
2783 | #define WM_DIRTY_DDB (1 << 25) | ||
2784 | |||
2785 | static unsigned int ilk_compute_wm_dirty(struct drm_device *dev, | ||
2786 | const struct hsw_wm_values *old, | ||
2787 | const struct hsw_wm_values *new) | ||
2788 | { | ||
2789 | unsigned int dirty = 0; | ||
2790 | enum pipe pipe; | ||
2791 | int wm_lp; | ||
2792 | |||
2793 | for_each_pipe(pipe) { | ||
2794 | if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { | ||
2795 | dirty |= WM_DIRTY_LINETIME(pipe); | ||
2796 | /* Must disable LP1+ watermarks too */ | ||
2797 | dirty |= WM_DIRTY_LP_ALL; | ||
2798 | } | ||
2799 | |||
2800 | if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { | ||
2801 | dirty |= WM_DIRTY_PIPE(pipe); | ||
2802 | /* Must disable LP1+ watermarks too */ | ||
2803 | dirty |= WM_DIRTY_LP_ALL; | ||
2804 | } | ||
2805 | } | ||
2806 | |||
2807 | if (old->enable_fbc_wm != new->enable_fbc_wm) { | ||
2808 | dirty |= WM_DIRTY_FBC; | ||
2809 | /* Must disable LP1+ watermarks too */ | ||
2810 | dirty |= WM_DIRTY_LP_ALL; | ||
2811 | } | ||
2812 | |||
2813 | if (old->partitioning != new->partitioning) { | ||
2814 | dirty |= WM_DIRTY_DDB; | ||
2815 | /* Must disable LP1+ watermarks too */ | ||
2816 | dirty |= WM_DIRTY_LP_ALL; | ||
2817 | } | ||
2818 | |||
2819 | /* LP1+ watermarks already deemed dirty, no need to continue */ | ||
2820 | if (dirty & WM_DIRTY_LP_ALL) | ||
2821 | return dirty; | ||
2822 | |||
2823 | /* Find the lowest numbered LP1+ watermark in need of an update... */ | ||
2824 | for (wm_lp = 1; wm_lp <= 3; wm_lp++) { | ||
2825 | if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || | ||
2826 | old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) | ||
2827 | break; | ||
2828 | } | ||
2829 | |||
2830 | /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ | ||
2831 | for (; wm_lp <= 3; wm_lp++) | ||
2832 | dirty |= WM_DIRTY_LP(wm_lp); | ||
2833 | |||
2834 | return dirty; | ||
2835 | } | ||
2836 | |||
2771 | /* | 2837 | /* |
2772 | * The spec says we shouldn't write when we don't need, because every write | 2838 | * The spec says we shouldn't write when we don't need, because every write |
2773 | * causes WMs to be re-evaluated, expending some power. | 2839 | * causes WMs to be re-evaluated, expending some power. |
2774 | */ | 2840 | */ |
2775 | static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | 2841 | static void hsw_write_wm_values(struct drm_i915_private *dev_priv, |
2776 | struct hsw_wm_values *results, | 2842 | struct hsw_wm_values *results) |
2777 | enum intel_ddb_partitioning partitioning) | ||
2778 | { | 2843 | { |
2779 | struct hsw_wm_values previous; | 2844 | struct hsw_wm_values *previous = &dev_priv->wm.hw; |
2845 | unsigned int dirty; | ||
2780 | uint32_t val; | 2846 | uint32_t val; |
2781 | enum intel_ddb_partitioning prev_partitioning; | 2847 | |
2782 | bool prev_enable_fbc_wm; | 2848 | dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results); |
2783 | 2849 | if (!dirty) | |
2784 | previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); | ||
2785 | previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK); | ||
2786 | previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB); | ||
2787 | previous.wm_lp[0] = I915_READ(WM1_LP_ILK); | ||
2788 | previous.wm_lp[1] = I915_READ(WM2_LP_ILK); | ||
2789 | previous.wm_lp[2] = I915_READ(WM3_LP_ILK); | ||
2790 | previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); | ||
2791 | previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); | ||
2792 | previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | ||
2793 | previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A)); | ||
2794 | previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B)); | ||
2795 | previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); | ||
2796 | |||
2797 | prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? | ||
2798 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
2799 | |||
2800 | prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | ||
2801 | |||
2802 | if (memcmp(results->wm_pipe, previous.wm_pipe, | ||
2803 | sizeof(results->wm_pipe)) == 0 && | ||
2804 | memcmp(results->wm_lp, previous.wm_lp, | ||
2805 | sizeof(results->wm_lp)) == 0 && | ||
2806 | memcmp(results->wm_lp_spr, previous.wm_lp_spr, | ||
2807 | sizeof(results->wm_lp_spr)) == 0 && | ||
2808 | memcmp(results->wm_linetime, previous.wm_linetime, | ||
2809 | sizeof(results->wm_linetime)) == 0 && | ||
2810 | partitioning == prev_partitioning && | ||
2811 | results->enable_fbc_wm == prev_enable_fbc_wm) | ||
2812 | return; | 2850 | return; |
2813 | 2851 | ||
2814 | if (previous.wm_lp[2] != 0) | 2852 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0) |
2815 | I915_WRITE(WM3_LP_ILK, 0); | 2853 | I915_WRITE(WM3_LP_ILK, 0); |
2816 | if (previous.wm_lp[1] != 0) | 2854 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0) |
2817 | I915_WRITE(WM2_LP_ILK, 0); | 2855 | I915_WRITE(WM2_LP_ILK, 0); |
2818 | if (previous.wm_lp[0] != 0) | 2856 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0) |
2819 | I915_WRITE(WM1_LP_ILK, 0); | 2857 | I915_WRITE(WM1_LP_ILK, 0); |
2820 | 2858 | ||
2821 | if (previous.wm_pipe[0] != results->wm_pipe[0]) | 2859 | if (dirty & WM_DIRTY_PIPE(PIPE_A)) |
2822 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); | 2860 | I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); |
2823 | if (previous.wm_pipe[1] != results->wm_pipe[1]) | 2861 | if (dirty & WM_DIRTY_PIPE(PIPE_B)) |
2824 | I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); | 2862 | I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); |
2825 | if (previous.wm_pipe[2] != results->wm_pipe[2]) | 2863 | if (dirty & WM_DIRTY_PIPE(PIPE_C)) |
2826 | I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); | 2864 | I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); |
2827 | 2865 | ||
2828 | if (previous.wm_linetime[0] != results->wm_linetime[0]) | 2866 | if (dirty & WM_DIRTY_LINETIME(PIPE_A)) |
2829 | I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); | 2867 | I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); |
2830 | if (previous.wm_linetime[1] != results->wm_linetime[1]) | 2868 | if (dirty & WM_DIRTY_LINETIME(PIPE_B)) |
2831 | I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); | 2869 | I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); |
2832 | if (previous.wm_linetime[2] != results->wm_linetime[2]) | 2870 | if (dirty & WM_DIRTY_LINETIME(PIPE_C)) |
2833 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); | 2871 | I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); |
2834 | 2872 | ||
2835 | if (prev_partitioning != partitioning) { | 2873 | if (dirty & WM_DIRTY_DDB) { |
2836 | val = I915_READ(WM_MISC); | 2874 | val = I915_READ(WM_MISC); |
2837 | if (partitioning == INTEL_DDB_PART_1_2) | 2875 | if (results->partitioning == INTEL_DDB_PART_1_2) |
2838 | val &= ~WM_MISC_DATA_PARTITION_5_6; | 2876 | val &= ~WM_MISC_DATA_PARTITION_5_6; |
2839 | else | 2877 | else |
2840 | val |= WM_MISC_DATA_PARTITION_5_6; | 2878 | val |= WM_MISC_DATA_PARTITION_5_6; |
2841 | I915_WRITE(WM_MISC, val); | 2879 | I915_WRITE(WM_MISC, val); |
2842 | } | 2880 | } |
2843 | 2881 | ||
2844 | if (prev_enable_fbc_wm != results->enable_fbc_wm) { | 2882 | if (dirty & WM_DIRTY_FBC) { |
2845 | val = I915_READ(DISP_ARB_CTL); | 2883 | val = I915_READ(DISP_ARB_CTL); |
2846 | if (results->enable_fbc_wm) | 2884 | if (results->enable_fbc_wm) |
2847 | val &= ~DISP_FBC_WM_DIS; | 2885 | val &= ~DISP_FBC_WM_DIS; |
@@ -2850,46 +2888,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, | |||
2850 | I915_WRITE(DISP_ARB_CTL, val); | 2888 | I915_WRITE(DISP_ARB_CTL, val); |
2851 | } | 2889 | } |
2852 | 2890 | ||
2853 | if (previous.wm_lp_spr[0] != results->wm_lp_spr[0]) | 2891 | if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) |
2854 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); | 2892 | I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); |
2855 | if (previous.wm_lp_spr[1] != results->wm_lp_spr[1]) | 2893 | if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) |
2856 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); | 2894 | I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); |
2857 | if (previous.wm_lp_spr[2] != results->wm_lp_spr[2]) | 2895 | if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) |
2858 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); | 2896 | I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); |
2859 | 2897 | ||
2860 | if (results->wm_lp[0] != 0) | 2898 | if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0) |
2861 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); | 2899 | I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); |
2862 | if (results->wm_lp[1] != 0) | 2900 | if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0) |
2863 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); | 2901 | I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); |
2864 | if (results->wm_lp[2] != 0) | 2902 | if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0) |
2865 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); | 2903 | I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); |
2904 | |||
2905 | dev_priv->wm.hw = *results; | ||
2866 | } | 2906 | } |
2867 | 2907 | ||
2868 | static void haswell_update_wm(struct drm_crtc *crtc) | 2908 | static void haswell_update_wm(struct drm_crtc *crtc) |
2869 | { | 2909 | { |
2910 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
2870 | struct drm_device *dev = crtc->dev; | 2911 | struct drm_device *dev = crtc->dev; |
2871 | struct drm_i915_private *dev_priv = dev->dev_private; | 2912 | struct drm_i915_private *dev_priv = dev->dev_private; |
2872 | struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; | 2913 | struct hsw_wm_maximums max; |
2873 | struct hsw_pipe_wm_parameters params[3]; | 2914 | struct hsw_pipe_wm_parameters params = {}; |
2874 | struct hsw_wm_values results_1_2, results_5_6, *best_results; | 2915 | struct hsw_wm_values results = {}; |
2875 | enum intel_ddb_partitioning partitioning; | 2916 | enum intel_ddb_partitioning partitioning; |
2917 | struct intel_pipe_wm pipe_wm = {}; | ||
2918 | struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; | ||
2919 | struct intel_wm_config config = {}; | ||
2920 | |||
2921 | hsw_compute_wm_parameters(crtc, ¶ms, &config); | ||
2922 | |||
2923 | intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); | ||
2924 | |||
2925 | if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) | ||
2926 | return; | ||
2927 | |||
2928 | intel_crtc->wm.active = pipe_wm; | ||
2929 | |||
2930 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); | ||
2931 | ilk_wm_merge(dev, &max, &lp_wm_1_2); | ||
2876 | 2932 | ||
2877 | hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); | 2933 | /* 5/6 split only in single pipe config on IVB+ */ |
2934 | if (INTEL_INFO(dev)->gen >= 7 && | ||
2935 | config.num_pipes_active == 1 && config.sprites_enabled) { | ||
2936 | ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); | ||
2937 | ilk_wm_merge(dev, &max, &lp_wm_5_6); | ||
2878 | 2938 | ||
2879 | hsw_compute_wm_results(dev, params, | 2939 | best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); |
2880 | &lp_max_1_2, &results_1_2); | ||
2881 | if (lp_max_1_2.pri != lp_max_5_6.pri) { | ||
2882 | hsw_compute_wm_results(dev, params, | ||
2883 | &lp_max_5_6, &results_5_6); | ||
2884 | best_results = hsw_find_best_result(&results_1_2, &results_5_6); | ||
2885 | } else { | 2940 | } else { |
2886 | best_results = &results_1_2; | 2941 | best_lp_wm = &lp_wm_1_2; |
2887 | } | 2942 | } |
2888 | 2943 | ||
2889 | partitioning = (best_results == &results_1_2) ? | 2944 | partitioning = (best_lp_wm == &lp_wm_1_2) ? |
2890 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; | 2945 | INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; |
2891 | 2946 | ||
2892 | hsw_write_wm_values(dev_priv, best_results, partitioning); | 2947 | hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results); |
2948 | |||
2949 | hsw_write_wm_values(dev_priv, &results); | ||
2893 | } | 2950 | } |
2894 | 2951 | ||
2895 | static void haswell_update_sprite_wm(struct drm_plane *plane, | 2952 | static void haswell_update_sprite_wm(struct drm_plane *plane, |
@@ -3069,6 +3126,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane, | |||
3069 | I915_WRITE(WM3S_LP_IVB, sprite_wm); | 3126 | I915_WRITE(WM3S_LP_IVB, sprite_wm); |
3070 | } | 3127 | } |
3071 | 3128 | ||
3129 | static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) | ||
3130 | { | ||
3131 | struct drm_device *dev = crtc->dev; | ||
3132 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3133 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | ||
3134 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
3135 | struct intel_pipe_wm *active = &intel_crtc->wm.active; | ||
3136 | enum pipe pipe = intel_crtc->pipe; | ||
3137 | static const unsigned int wm0_pipe_reg[] = { | ||
3138 | [PIPE_A] = WM0_PIPEA_ILK, | ||
3139 | [PIPE_B] = WM0_PIPEB_ILK, | ||
3140 | [PIPE_C] = WM0_PIPEC_IVB, | ||
3141 | }; | ||
3142 | |||
3143 | hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); | ||
3144 | hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); | ||
3145 | |||
3146 | if (intel_crtc_active(crtc)) { | ||
3147 | u32 tmp = hw->wm_pipe[pipe]; | ||
3148 | |||
3149 | /* | ||
3150 | * For active pipes LP0 watermark is marked as | ||
3151 | * enabled, and LP1+ watermaks as disabled since | ||
3152 | * we can't really reverse compute them in case | ||
3153 | * multiple pipes are active. | ||
3154 | */ | ||
3155 | active->wm[0].enable = true; | ||
3156 | active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; | ||
3157 | active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; | ||
3158 | active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; | ||
3159 | active->linetime = hw->wm_linetime[pipe]; | ||
3160 | } else { | ||
3161 | int level, max_level = ilk_wm_max_level(dev); | ||
3162 | |||
3163 | /* | ||
3164 | * For inactive pipes, all watermark levels | ||
3165 | * should be marked as enabled but zeroed, | ||
3166 | * which is what we'd compute them to. | ||
3167 | */ | ||
3168 | for (level = 0; level <= max_level; level++) | ||
3169 | active->wm[level].enable = true; | ||
3170 | } | ||
3171 | } | ||
3172 | |||
3173 | void ilk_wm_get_hw_state(struct drm_device *dev) | ||
3174 | { | ||
3175 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
3176 | struct hsw_wm_values *hw = &dev_priv->wm.hw; | ||
3177 | struct drm_crtc *crtc; | ||
3178 | |||
3179 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | ||
3180 | ilk_pipe_wm_get_hw_state(crtc); | ||
3181 | |||
3182 | hw->wm_lp[0] = I915_READ(WM1_LP_ILK); | ||
3183 | hw->wm_lp[1] = I915_READ(WM2_LP_ILK); | ||
3184 | hw->wm_lp[2] = I915_READ(WM3_LP_ILK); | ||
3185 | |||
3186 | hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); | ||
3187 | hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); | ||
3188 | hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); | ||
3189 | |||
3190 | hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? | ||
3191 | INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; | ||
3192 | |||
3193 | hw->enable_fbc_wm = | ||
3194 | !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); | ||
3195 | } | ||
3196 | |||
3072 | /** | 3197 | /** |
3073 | * intel_update_watermarks - update FIFO watermark values based on current modes | 3198 | * intel_update_watermarks - update FIFO watermark values based on current modes |
3074 | * | 3199 | * |
@@ -3442,22 +3567,26 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
3442 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 3567 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
3443 | { | 3568 | { |
3444 | mutex_lock(&dev_priv->rps.hw_lock); | 3569 | mutex_lock(&dev_priv->rps.hw_lock); |
3445 | if (dev_priv->info->is_valleyview) | 3570 | if (dev_priv->rps.enabled) { |
3446 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3571 | if (dev_priv->info->is_valleyview) |
3447 | else | 3572 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
3448 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); | 3573 | else |
3449 | dev_priv->rps.last_adj = 0; | 3574 | gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); |
3575 | dev_priv->rps.last_adj = 0; | ||
3576 | } | ||
3450 | mutex_unlock(&dev_priv->rps.hw_lock); | 3577 | mutex_unlock(&dev_priv->rps.hw_lock); |
3451 | } | 3578 | } |
3452 | 3579 | ||
3453 | void gen6_rps_boost(struct drm_i915_private *dev_priv) | 3580 | void gen6_rps_boost(struct drm_i915_private *dev_priv) |
3454 | { | 3581 | { |
3455 | mutex_lock(&dev_priv->rps.hw_lock); | 3582 | mutex_lock(&dev_priv->rps.hw_lock); |
3456 | if (dev_priv->info->is_valleyview) | 3583 | if (dev_priv->rps.enabled) { |
3457 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3584 | if (dev_priv->info->is_valleyview) |
3458 | else | 3585 | valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
3459 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); | 3586 | else |
3460 | dev_priv->rps.last_adj = 0; | 3587 | gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay); |
3588 | dev_priv->rps.last_adj = 0; | ||
3589 | } | ||
3461 | mutex_unlock(&dev_priv->rps.hw_lock); | 3590 | mutex_unlock(&dev_priv->rps.hw_lock); |
3462 | } | 3591 | } |
3463 | 3592 | ||
@@ -3740,16 +3869,21 @@ void gen6_update_ring_freq(struct drm_device *dev) | |||
3740 | unsigned int gpu_freq; | 3869 | unsigned int gpu_freq; |
3741 | unsigned int max_ia_freq, min_ring_freq; | 3870 | unsigned int max_ia_freq, min_ring_freq; |
3742 | int scaling_factor = 180; | 3871 | int scaling_factor = 180; |
3872 | struct cpufreq_policy *policy; | ||
3743 | 3873 | ||
3744 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 3874 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); |
3745 | 3875 | ||
3746 | max_ia_freq = cpufreq_quick_get_max(0); | 3876 | policy = cpufreq_cpu_get(0); |
3747 | /* | 3877 | if (policy) { |
3748 | * Default to measured freq if none found, PCU will ensure we don't go | 3878 | max_ia_freq = policy->cpuinfo.max_freq; |
3749 | * over | 3879 | cpufreq_cpu_put(policy); |
3750 | */ | 3880 | } else { |
3751 | if (!max_ia_freq) | 3881 | /* |
3882 | * Default to measured freq if none found, PCU will ensure we | ||
3883 | * don't go over | ||
3884 | */ | ||
3752 | max_ia_freq = tsc_khz; | 3885 | max_ia_freq = tsc_khz; |
3886 | } | ||
3753 | 3887 | ||
3754 | /* Convert from kHz to MHz */ | 3888 | /* Convert from kHz to MHz */ |
3755 | max_ia_freq /= 1000; | 3889 | max_ia_freq /= 1000; |
@@ -4711,6 +4845,7 @@ void intel_disable_gt_powersave(struct drm_device *dev) | |||
4711 | valleyview_disable_rps(dev); | 4845 | valleyview_disable_rps(dev); |
4712 | else | 4846 | else |
4713 | gen6_disable_rps(dev); | 4847 | gen6_disable_rps(dev); |
4848 | dev_priv->rps.enabled = false; | ||
4714 | mutex_unlock(&dev_priv->rps.hw_lock); | 4849 | mutex_unlock(&dev_priv->rps.hw_lock); |
4715 | } | 4850 | } |
4716 | } | 4851 | } |
@@ -4730,6 +4865,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) | |||
4730 | gen6_enable_rps(dev); | 4865 | gen6_enable_rps(dev); |
4731 | gen6_update_ring_freq(dev); | 4866 | gen6_update_ring_freq(dev); |
4732 | } | 4867 | } |
4868 | dev_priv->rps.enabled = true; | ||
4733 | mutex_unlock(&dev_priv->rps.hw_lock); | 4869 | mutex_unlock(&dev_priv->rps.hw_lock); |
4734 | } | 4870 | } |
4735 | 4871 | ||
@@ -4773,7 +4909,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev) | |||
4773 | I915_WRITE(DSPCNTR(pipe), | 4909 | I915_WRITE(DSPCNTR(pipe), |
4774 | I915_READ(DSPCNTR(pipe)) | | 4910 | I915_READ(DSPCNTR(pipe)) | |
4775 | DISPPLANE_TRICKLE_FEED_DISABLE); | 4911 | DISPPLANE_TRICKLE_FEED_DISABLE); |
4776 | intel_flush_display_plane(dev_priv, pipe); | 4912 | intel_flush_primary_plane(dev_priv, pipe); |
4777 | } | 4913 | } |
4778 | } | 4914 | } |
4779 | 4915 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b67104aaade5..2dec134f75eb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -395,8 +395,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
395 | int ret = 0; | 395 | int ret = 0; |
396 | u32 head; | 396 | u32 head; |
397 | 397 | ||
398 | if (HAS_FORCE_WAKE(dev)) | 398 | gen6_gt_force_wake_get(dev_priv); |
399 | gen6_gt_force_wake_get(dev_priv); | ||
400 | 399 | ||
401 | if (I915_NEED_GFX_HWS(dev)) | 400 | if (I915_NEED_GFX_HWS(dev)) |
402 | intel_ring_setup_status_page(ring); | 401 | intel_ring_setup_status_page(ring); |
@@ -469,8 +468,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
469 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 468 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
470 | 469 | ||
471 | out: | 470 | out: |
472 | if (HAS_FORCE_WAKE(dev)) | 471 | gen6_gt_force_wake_put(dev_priv); |
473 | gen6_gt_force_wake_put(dev_priv); | ||
474 | 472 | ||
475 | return ret; | 473 | return ret; |
476 | } | 474 | } |
@@ -1326,7 +1324,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
1326 | /* Disable the ring buffer. The ring must be idle at this point */ | 1324 | /* Disable the ring buffer. The ring must be idle at this point */ |
1327 | dev_priv = ring->dev->dev_private; | 1325 | dev_priv = ring->dev->dev_private; |
1328 | ret = intel_ring_idle(ring); | 1326 | ret = intel_ring_idle(ring); |
1329 | if (ret) | 1327 | if (ret && !i915_reset_in_progress(&dev_priv->gpu_error)) |
1330 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 1328 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
1331 | ring->name, ret); | 1329 | ring->name, ret); |
1332 | 1330 | ||
@@ -1337,6 +1335,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
1337 | i915_gem_object_unpin(ring->obj); | 1335 | i915_gem_object_unpin(ring->obj); |
1338 | drm_gem_object_unreference(&ring->obj->base); | 1336 | drm_gem_object_unreference(&ring->obj->base); |
1339 | ring->obj = NULL; | 1337 | ring->obj = NULL; |
1338 | ring->preallocated_lazy_request = NULL; | ||
1339 | ring->outstanding_lazy_seqno = 0; | ||
1340 | 1340 | ||
1341 | if (ring->cleanup) | 1341 | if (ring->cleanup) |
1342 | ring->cleanup(ring); | 1342 | ring->cleanup(ring); |
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index acd1cfe8b7dd..9944d8135e87 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c | |||
@@ -25,7 +25,10 @@ | |||
25 | #include "i915_drv.h" | 25 | #include "i915_drv.h" |
26 | #include "intel_drv.h" | 26 | #include "intel_drv.h" |
27 | 27 | ||
28 | /* IOSF sideband */ | 28 | /* |
29 | * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and | ||
30 | * VLV_VLV2_PUNIT_HAS_0.8.docx | ||
31 | */ | ||
29 | static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, | 32 | static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, |
30 | u32 port, u32 opcode, u32 addr, u32 *val) | 33 | u32 port, u32 opcode, u32 addr, u32 *val) |
31 | { | 34 | { |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index cae10bc746d0..8afaad6bcc48 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc) | |||
521 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 521 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
522 | int reg = DSPCNTR(intel_crtc->plane); | 522 | int reg = DSPCNTR(intel_crtc->plane); |
523 | 523 | ||
524 | if (!intel_crtc->primary_disabled) | 524 | if (intel_crtc->primary_enabled) |
525 | return; | 525 | return; |
526 | 526 | ||
527 | intel_crtc->primary_disabled = false; | 527 | intel_crtc->primary_enabled = true; |
528 | intel_update_fbc(dev); | ||
529 | 528 | ||
530 | I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); | 529 | I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE); |
530 | intel_flush_primary_plane(dev_priv, intel_crtc->plane); | ||
531 | |||
532 | /* | ||
533 | * FIXME IPS should be fine as long as one plane is | ||
534 | * enabled, but in practice it seems to have problems | ||
535 | * when going from primary only to sprite only and vice | ||
536 | * versa. | ||
537 | */ | ||
538 | if (intel_crtc->config.ips_enabled) { | ||
539 | intel_wait_for_vblank(dev, intel_crtc->pipe); | ||
540 | hsw_enable_ips(intel_crtc); | ||
541 | } | ||
542 | |||
543 | mutex_lock(&dev->struct_mutex); | ||
544 | intel_update_fbc(dev); | ||
545 | mutex_unlock(&dev->struct_mutex); | ||
531 | } | 546 | } |
532 | 547 | ||
533 | static void | 548 | static void |
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc) | |||
538 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 553 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
539 | int reg = DSPCNTR(intel_crtc->plane); | 554 | int reg = DSPCNTR(intel_crtc->plane); |
540 | 555 | ||
541 | if (intel_crtc->primary_disabled) | 556 | if (!intel_crtc->primary_enabled) |
542 | return; | 557 | return; |
543 | 558 | ||
544 | I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); | 559 | intel_crtc->primary_enabled = false; |
545 | 560 | ||
546 | intel_crtc->primary_disabled = true; | 561 | mutex_lock(&dev->struct_mutex); |
547 | intel_update_fbc(dev); | 562 | if (dev_priv->fbc.plane == intel_crtc->plane) |
563 | intel_disable_fbc(dev); | ||
564 | mutex_unlock(&dev->struct_mutex); | ||
565 | |||
566 | /* | ||
567 | * FIXME IPS should be fine as long as one plane is | ||
568 | * enabled, but in practice it seems to have problems | ||
569 | * when going from primary only to sprite only and vice | ||
570 | * versa. | ||
571 | */ | ||
572 | hsw_disable_ips(intel_crtc); | ||
573 | |||
574 | I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE); | ||
575 | intel_flush_primary_plane(dev_priv, intel_crtc->plane); | ||
548 | } | 576 | } |
549 | 577 | ||
550 | static int | 578 | static int |
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
623 | uint32_t src_w, uint32_t src_h) | 651 | uint32_t src_w, uint32_t src_h) |
624 | { | 652 | { |
625 | struct drm_device *dev = plane->dev; | 653 | struct drm_device *dev = plane->dev; |
626 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
627 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 654 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
628 | struct intel_plane *intel_plane = to_intel_plane(plane); | 655 | struct intel_plane *intel_plane = to_intel_plane(plane); |
629 | struct intel_framebuffer *intel_fb; | 656 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); |
630 | struct drm_i915_gem_object *obj, *old_obj; | 657 | struct drm_i915_gem_object *obj = intel_fb->obj; |
631 | int pipe = intel_plane->pipe; | 658 | struct drm_i915_gem_object *old_obj = intel_plane->obj; |
632 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, | 659 | int ret; |
633 | pipe); | ||
634 | int ret = 0; | ||
635 | bool disable_primary = false; | 660 | bool disable_primary = false; |
636 | bool visible; | 661 | bool visible; |
637 | int hscale, vscale; | 662 | int hscale, vscale; |
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
652 | .y2 = crtc_y + crtc_h, | 677 | .y2 = crtc_y + crtc_h, |
653 | }; | 678 | }; |
654 | const struct drm_rect clip = { | 679 | const struct drm_rect clip = { |
655 | .x2 = intel_crtc->config.pipe_src_w, | 680 | .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, |
656 | .y2 = intel_crtc->config.pipe_src_h, | 681 | .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, |
682 | }; | ||
683 | const struct { | ||
684 | int crtc_x, crtc_y; | ||
685 | unsigned int crtc_w, crtc_h; | ||
686 | uint32_t src_x, src_y, src_w, src_h; | ||
687 | } orig = { | ||
688 | .crtc_x = crtc_x, | ||
689 | .crtc_y = crtc_y, | ||
690 | .crtc_w = crtc_w, | ||
691 | .crtc_h = crtc_h, | ||
692 | .src_x = src_x, | ||
693 | .src_y = src_y, | ||
694 | .src_w = src_w, | ||
695 | .src_h = src_h, | ||
657 | }; | 696 | }; |
658 | |||
659 | intel_fb = to_intel_framebuffer(fb); | ||
660 | obj = intel_fb->obj; | ||
661 | |||
662 | old_obj = intel_plane->obj; | ||
663 | |||
664 | intel_plane->crtc_x = crtc_x; | ||
665 | intel_plane->crtc_y = crtc_y; | ||
666 | intel_plane->crtc_w = crtc_w; | ||
667 | intel_plane->crtc_h = crtc_h; | ||
668 | intel_plane->src_x = src_x; | ||
669 | intel_plane->src_y = src_y; | ||
670 | intel_plane->src_w = src_w; | ||
671 | intel_plane->src_h = src_h; | ||
672 | |||
673 | /* Pipe must be running... */ | ||
674 | if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) { | ||
675 | DRM_DEBUG_KMS("Pipe disabled\n"); | ||
676 | return -EINVAL; | ||
677 | } | ||
678 | 697 | ||
679 | /* Don't modify another pipe's plane */ | 698 | /* Don't modify another pipe's plane */ |
680 | if (intel_plane->pipe != intel_crtc->pipe) { | 699 | if (intel_plane->pipe != intel_crtc->pipe) { |
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
810 | * we can disable the primary and save power. | 829 | * we can disable the primary and save power. |
811 | */ | 830 | */ |
812 | disable_primary = drm_rect_equals(&dst, &clip); | 831 | disable_primary = drm_rect_equals(&dst, &clip); |
813 | WARN_ON(disable_primary && !visible); | 832 | WARN_ON(disable_primary && !visible && intel_crtc->active); |
814 | 833 | ||
815 | mutex_lock(&dev->struct_mutex); | 834 | mutex_lock(&dev->struct_mutex); |
816 | 835 | ||
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
820 | * the sprite planes only require 128KiB alignment and 32 PTE padding. | 839 | * the sprite planes only require 128KiB alignment and 32 PTE padding. |
821 | */ | 840 | */ |
822 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); | 841 | ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); |
823 | if (ret) | ||
824 | goto out_unlock; | ||
825 | 842 | ||
826 | intel_plane->obj = obj; | 843 | mutex_unlock(&dev->struct_mutex); |
827 | |||
828 | /* | ||
829 | * Be sure to re-enable the primary before the sprite is no longer | ||
830 | * covering it fully. | ||
831 | */ | ||
832 | if (!disable_primary) | ||
833 | intel_enable_primary(crtc); | ||
834 | 844 | ||
835 | if (visible) | 845 | if (ret) |
836 | intel_plane->update_plane(plane, crtc, fb, obj, | 846 | return ret; |
837 | crtc_x, crtc_y, crtc_w, crtc_h, | 847 | |
838 | src_x, src_y, src_w, src_h); | 848 | intel_plane->crtc_x = orig.crtc_x; |
839 | else | 849 | intel_plane->crtc_y = orig.crtc_y; |
840 | intel_plane->disable_plane(plane, crtc); | 850 | intel_plane->crtc_w = orig.crtc_w; |
851 | intel_plane->crtc_h = orig.crtc_h; | ||
852 | intel_plane->src_x = orig.src_x; | ||
853 | intel_plane->src_y = orig.src_y; | ||
854 | intel_plane->src_w = orig.src_w; | ||
855 | intel_plane->src_h = orig.src_h; | ||
856 | intel_plane->obj = obj; | ||
841 | 857 | ||
842 | if (disable_primary) | 858 | if (intel_crtc->active) { |
843 | intel_disable_primary(crtc); | 859 | /* |
860 | * Be sure to re-enable the primary before the sprite is no longer | ||
861 | * covering it fully. | ||
862 | */ | ||
863 | if (!disable_primary) | ||
864 | intel_enable_primary(crtc); | ||
865 | |||
866 | if (visible) | ||
867 | intel_plane->update_plane(plane, crtc, fb, obj, | ||
868 | crtc_x, crtc_y, crtc_w, crtc_h, | ||
869 | src_x, src_y, src_w, src_h); | ||
870 | else | ||
871 | intel_plane->disable_plane(plane, crtc); | ||
872 | |||
873 | if (disable_primary) | ||
874 | intel_disable_primary(crtc); | ||
875 | } | ||
844 | 876 | ||
845 | /* Unpin old obj after new one is active to avoid ugliness */ | 877 | /* Unpin old obj after new one is active to avoid ugliness */ |
846 | if (old_obj) { | 878 | if (old_obj) { |
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
850 | * wait for vblank to avoid ugliness, we only need to | 882 | * wait for vblank to avoid ugliness, we only need to |
851 | * do the pin & ref bookkeeping. | 883 | * do the pin & ref bookkeeping. |
852 | */ | 884 | */ |
853 | if (old_obj != obj) { | 885 | if (old_obj != obj && intel_crtc->active) |
854 | mutex_unlock(&dev->struct_mutex); | 886 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
855 | intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); | 887 | |
856 | mutex_lock(&dev->struct_mutex); | 888 | mutex_lock(&dev->struct_mutex); |
857 | } | ||
858 | intel_unpin_fb_obj(old_obj); | 889 | intel_unpin_fb_obj(old_obj); |
890 | mutex_unlock(&dev->struct_mutex); | ||
859 | } | 891 | } |
860 | 892 | ||
861 | out_unlock: | 893 | return 0; |
862 | mutex_unlock(&dev->struct_mutex); | ||
863 | return ret; | ||
864 | } | 894 | } |
865 | 895 | ||
866 | static int | 896 | static int |
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane) | |||
868 | { | 898 | { |
869 | struct drm_device *dev = plane->dev; | 899 | struct drm_device *dev = plane->dev; |
870 | struct intel_plane *intel_plane = to_intel_plane(plane); | 900 | struct intel_plane *intel_plane = to_intel_plane(plane); |
871 | int ret = 0; | 901 | struct intel_crtc *intel_crtc; |
872 | 902 | ||
873 | if (!plane->fb) | 903 | if (!plane->fb) |
874 | return 0; | 904 | return 0; |
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane) | |||
876 | if (WARN_ON(!plane->crtc)) | 906 | if (WARN_ON(!plane->crtc)) |
877 | return -EINVAL; | 907 | return -EINVAL; |
878 | 908 | ||
879 | intel_enable_primary(plane->crtc); | 909 | intel_crtc = to_intel_crtc(plane->crtc); |
880 | intel_plane->disable_plane(plane, plane->crtc); | ||
881 | 910 | ||
882 | if (!intel_plane->obj) | 911 | if (intel_crtc->active) { |
883 | goto out; | 912 | intel_enable_primary(plane->crtc); |
913 | intel_plane->disable_plane(plane, plane->crtc); | ||
914 | } | ||
884 | 915 | ||
885 | intel_wait_for_vblank(dev, intel_plane->pipe); | 916 | if (intel_plane->obj) { |
917 | if (intel_crtc->active) | ||
918 | intel_wait_for_vblank(dev, intel_plane->pipe); | ||
886 | 919 | ||
887 | mutex_lock(&dev->struct_mutex); | 920 | mutex_lock(&dev->struct_mutex); |
888 | intel_unpin_fb_obj(intel_plane->obj); | 921 | intel_unpin_fb_obj(intel_plane->obj); |
889 | intel_plane->obj = NULL; | 922 | mutex_unlock(&dev->struct_mutex); |
890 | mutex_unlock(&dev->struct_mutex); | ||
891 | out: | ||
892 | 923 | ||
893 | return ret; | 924 | intel_plane->obj = NULL; |
925 | } | ||
926 | |||
927 | return 0; | ||
894 | } | 928 | } |
895 | 929 | ||
896 | static void intel_destroy_plane(struct drm_plane *plane) | 930 | static void intel_destroy_plane(struct drm_plane *plane) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d61aec23a523..18c406246a2d 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder) | |||
1094 | unsigned int xsize, ysize; | 1094 | unsigned int xsize, ysize; |
1095 | /* Pipe must be off here */ | 1095 | /* Pipe must be off here */ |
1096 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); | 1096 | I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); |
1097 | intel_flush_display_plane(dev_priv, intel_crtc->plane); | 1097 | intel_flush_primary_plane(dev_priv, intel_crtc->plane); |
1098 | 1098 | ||
1099 | /* Wait for vblank for the disable to take effect */ | 1099 | /* Wait for vblank for the disable to take effect */ |
1100 | if (IS_GEN2(dev)) | 1100 | if (IS_GEN2(dev)) |
@@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder) | |||
1123 | 1123 | ||
1124 | I915_WRITE(pipeconf_reg, pipeconf); | 1124 | I915_WRITE(pipeconf_reg, pipeconf); |
1125 | I915_WRITE(dspcntr_reg, dspcntr); | 1125 | I915_WRITE(dspcntr_reg, dspcntr); |
1126 | intel_flush_display_plane(dev_priv, intel_crtc->plane); | 1126 | intel_flush_primary_plane(dev_priv, intel_crtc->plane); |
1127 | } | 1127 | } |
1128 | 1128 | ||
1129 | j = 0; | 1129 | j = 0; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 288a3a654f06..f6fae35c568e 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -222,70 +222,19 @@ void intel_uncore_early_sanitize(struct drm_device *dev) | |||
222 | 222 | ||
223 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) | 223 | if (HAS_FPGA_DBG_UNCLAIMED(dev)) |
224 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | 224 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
225 | } | ||
226 | |||
227 | void intel_uncore_init(struct drm_device *dev) | ||
228 | { | ||
229 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
230 | |||
231 | INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, | ||
232 | gen6_force_wake_work); | ||
233 | 225 | ||
234 | if (IS_VALLEYVIEW(dev)) { | 226 | if (IS_HASWELL(dev) && |
235 | dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; | 227 | (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { |
236 | dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; | 228 | /* The docs do not explain exactly how the calculation can be |
237 | } else if (IS_HASWELL(dev)) { | 229 | * made. It is somewhat guessable, but for now, it's always |
238 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; | 230 | * 128MB. |
239 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; | 231 | * NB: We can't write IDICR yet because we do not have gt funcs |
240 | } else if (IS_IVYBRIDGE(dev)) { | 232 | * set up */ |
241 | u32 ecobus; | 233 | dev_priv->ellc_size = 128; |
242 | 234 | DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); | |
243 | /* IVB configs may use multi-threaded forcewake */ | ||
244 | |||
245 | /* A small trick here - if the bios hasn't configured | ||
246 | * MT forcewake, and if the device is in RC6, then | ||
247 | * force_wake_mt_get will not wake the device and the | ||
248 | * ECOBUS read will return zero. Which will be | ||
249 | * (correctly) interpreted by the test below as MT | ||
250 | * forcewake being disabled. | ||
251 | */ | ||
252 | mutex_lock(&dev->struct_mutex); | ||
253 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
254 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); | ||
255 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
256 | mutex_unlock(&dev->struct_mutex); | ||
257 | |||
258 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
259 | dev_priv->uncore.funcs.force_wake_get = | ||
260 | __gen6_gt_force_wake_mt_get; | ||
261 | dev_priv->uncore.funcs.force_wake_put = | ||
262 | __gen6_gt_force_wake_mt_put; | ||
263 | } else { | ||
264 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | ||
265 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | ||
266 | dev_priv->uncore.funcs.force_wake_get = | ||
267 | __gen6_gt_force_wake_get; | ||
268 | dev_priv->uncore.funcs.force_wake_put = | ||
269 | __gen6_gt_force_wake_put; | ||
270 | } | ||
271 | } else if (IS_GEN6(dev)) { | ||
272 | dev_priv->uncore.funcs.force_wake_get = | ||
273 | __gen6_gt_force_wake_get; | ||
274 | dev_priv->uncore.funcs.force_wake_put = | ||
275 | __gen6_gt_force_wake_put; | ||
276 | } | 235 | } |
277 | } | 236 | } |
278 | 237 | ||
279 | void intel_uncore_fini(struct drm_device *dev) | ||
280 | { | ||
281 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
282 | |||
283 | flush_delayed_work(&dev_priv->uncore.force_wake_work); | ||
284 | |||
285 | /* Paranoia: make sure we have disabled everything before we exit. */ | ||
286 | intel_uncore_sanitize(dev); | ||
287 | } | ||
288 | |||
289 | static void intel_uncore_forcewake_reset(struct drm_device *dev) | 238 | static void intel_uncore_forcewake_reset(struct drm_device *dev) |
290 | { | 239 | { |
291 | struct drm_i915_private *dev_priv = dev->dev_private; | 240 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -333,6 +282,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) | |||
333 | { | 282 | { |
334 | unsigned long irqflags; | 283 | unsigned long irqflags; |
335 | 284 | ||
285 | if (!dev_priv->uncore.funcs.force_wake_get) | ||
286 | return; | ||
287 | |||
336 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 288 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
337 | if (dev_priv->uncore.forcewake_count++ == 0) | 289 | if (dev_priv->uncore.forcewake_count++ == 0) |
338 | dev_priv->uncore.funcs.force_wake_get(dev_priv); | 290 | dev_priv->uncore.funcs.force_wake_get(dev_priv); |
@@ -346,6 +298,9 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
346 | { | 298 | { |
347 | unsigned long irqflags; | 299 | unsigned long irqflags; |
348 | 300 | ||
301 | if (!dev_priv->uncore.funcs.force_wake_put) | ||
302 | return; | ||
303 | |||
349 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 304 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
350 | if (--dev_priv->uncore.forcewake_count == 0) { | 305 | if (--dev_priv->uncore.forcewake_count == 0) { |
351 | dev_priv->uncore.forcewake_count++; | 306 | dev_priv->uncore.forcewake_count++; |
@@ -358,9 +313,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) | |||
358 | 313 | ||
359 | /* We give fast paths for the really cool registers */ | 314 | /* We give fast paths for the really cool registers */ |
360 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | 315 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ |
361 | ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ | 316 | ((reg) < 0x40000 && (reg) != FORCEWAKE) |
362 | ((reg) < 0x40000) && \ | ||
363 | ((reg) != FORCEWAKE)) | ||
364 | 317 | ||
365 | static void | 318 | static void |
366 | ilk_dummy_write(struct drm_i915_private *dev_priv) | 319 | ilk_dummy_write(struct drm_i915_private *dev_priv) |
@@ -374,8 +327,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv) | |||
374 | static void | 327 | static void |
375 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) | 328 | hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) |
376 | { | 329 | { |
377 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | 330 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { |
378 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
379 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", | 331 | DRM_ERROR("Unknown unclaimed register before writing to %x\n", |
380 | reg); | 332 | reg); |
381 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | 333 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
@@ -385,20 +337,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) | |||
385 | static void | 337 | static void |
386 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) | 338 | hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) |
387 | { | 339 | { |
388 | if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && | 340 | if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { |
389 | (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | ||
390 | DRM_ERROR("Unclaimed write to %x\n", reg); | 341 | DRM_ERROR("Unclaimed write to %x\n", reg); |
391 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | 342 | __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); |
392 | } | 343 | } |
393 | } | 344 | } |
394 | 345 | ||
395 | #define __i915_read(x) \ | 346 | #define REG_READ_HEADER(x) \ |
396 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ | ||
397 | unsigned long irqflags; \ | 347 | unsigned long irqflags; \ |
398 | u##x val = 0; \ | 348 | u##x val = 0; \ |
399 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ | 349 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) |
400 | if (dev_priv->info->gen == 5) \ | 350 | |
401 | ilk_dummy_write(dev_priv); \ | 351 | #define REG_READ_FOOTER \ |
352 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | ||
353 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | ||
354 | return val | ||
355 | |||
356 | #define __gen4_read(x) \ | ||
357 | static u##x \ | ||
358 | gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | ||
359 | REG_READ_HEADER(x); \ | ||
360 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
361 | REG_READ_FOOTER; \ | ||
362 | } | ||
363 | |||
364 | #define __gen5_read(x) \ | ||
365 | static u##x \ | ||
366 | gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | ||
367 | REG_READ_HEADER(x); \ | ||
368 | ilk_dummy_write(dev_priv); \ | ||
369 | val = __raw_i915_read##x(dev_priv, reg); \ | ||
370 | REG_READ_FOOTER; \ | ||
371 | } | ||
372 | |||
373 | #define __gen6_read(x) \ | ||
374 | static u##x \ | ||
375 | gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ | ||
376 | REG_READ_HEADER(x); \ | ||
402 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 377 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
403 | if (dev_priv->uncore.forcewake_count == 0) \ | 378 | if (dev_priv->uncore.forcewake_count == 0) \ |
404 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ | 379 | dev_priv->uncore.funcs.force_wake_get(dev_priv); \ |
@@ -408,28 +383,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ | |||
408 | } else { \ | 383 | } else { \ |
409 | val = __raw_i915_read##x(dev_priv, reg); \ | 384 | val = __raw_i915_read##x(dev_priv, reg); \ |
410 | } \ | 385 | } \ |
386 | REG_READ_FOOTER; \ | ||
387 | } | ||
388 | |||
389 | __gen6_read(8) | ||
390 | __gen6_read(16) | ||
391 | __gen6_read(32) | ||
392 | __gen6_read(64) | ||
393 | __gen5_read(8) | ||
394 | __gen5_read(16) | ||
395 | __gen5_read(32) | ||
396 | __gen5_read(64) | ||
397 | __gen4_read(8) | ||
398 | __gen4_read(16) | ||
399 | __gen4_read(32) | ||
400 | __gen4_read(64) | ||
401 | |||
402 | #undef __gen6_read | ||
403 | #undef __gen5_read | ||
404 | #undef __gen4_read | ||
405 | #undef REG_READ_FOOTER | ||
406 | #undef REG_READ_HEADER | ||
407 | |||
408 | #define REG_WRITE_HEADER \ | ||
409 | unsigned long irqflags; \ | ||
410 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | ||
411 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags) | ||
412 | |||
413 | #define __gen4_write(x) \ | ||
414 | static void \ | ||
415 | gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | ||
416 | REG_WRITE_HEADER; \ | ||
417 | __raw_i915_write##x(dev_priv, reg, val); \ | ||
411 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 418 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
412 | trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ | ||
413 | return val; \ | ||
414 | } | 419 | } |
415 | 420 | ||
416 | __i915_read(8) | 421 | #define __gen5_write(x) \ |
417 | __i915_read(16) | 422 | static void \ |
418 | __i915_read(32) | 423 | gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
419 | __i915_read(64) | 424 | REG_WRITE_HEADER; \ |
420 | #undef __i915_read | 425 | ilk_dummy_write(dev_priv); \ |
426 | __raw_i915_write##x(dev_priv, reg, val); \ | ||
427 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | ||
428 | } | ||
421 | 429 | ||
422 | #define __i915_write(x) \ | 430 | #define __gen6_write(x) \ |
423 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ | 431 | static void \ |
424 | unsigned long irqflags; \ | 432 | gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ |
425 | u32 __fifo_ret = 0; \ | 433 | u32 __fifo_ret = 0; \ |
426 | trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ | 434 | REG_WRITE_HEADER; \ |
427 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ | 435 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
436 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | ||
437 | } \ | ||
438 | __raw_i915_write##x(dev_priv, reg, val); \ | ||
439 | if (unlikely(__fifo_ret)) { \ | ||
440 | gen6_gt_check_fifodbg(dev_priv); \ | ||
441 | } \ | ||
442 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | ||
443 | } | ||
444 | |||
445 | #define __hsw_write(x) \ | ||
446 | static void \ | ||
447 | hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ | ||
448 | u32 __fifo_ret = 0; \ | ||
449 | REG_WRITE_HEADER; \ | ||
428 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | 450 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ |
429 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ | 451 | __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ |
430 | } \ | 452 | } \ |
431 | if (dev_priv->info->gen == 5) \ | ||
432 | ilk_dummy_write(dev_priv); \ | ||
433 | hsw_unclaimed_reg_clear(dev_priv, reg); \ | 453 | hsw_unclaimed_reg_clear(dev_priv, reg); \ |
434 | __raw_i915_write##x(dev_priv, reg, val); \ | 454 | __raw_i915_write##x(dev_priv, reg, val); \ |
435 | if (unlikely(__fifo_ret)) { \ | 455 | if (unlikely(__fifo_ret)) { \ |
@@ -438,11 +458,134 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr | |||
438 | hsw_unclaimed_reg_check(dev_priv, reg); \ | 458 | hsw_unclaimed_reg_check(dev_priv, reg); \ |
439 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ | 459 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ |
440 | } | 460 | } |
441 | __i915_write(8) | 461 | |
442 | __i915_write(16) | 462 | __hsw_write(8) |
443 | __i915_write(32) | 463 | __hsw_write(16) |
444 | __i915_write(64) | 464 | __hsw_write(32) |
445 | #undef __i915_write | 465 | __hsw_write(64) |
466 | __gen6_write(8) | ||
467 | __gen6_write(16) | ||
468 | __gen6_write(32) | ||
469 | __gen6_write(64) | ||
470 | __gen5_write(8) | ||
471 | __gen5_write(16) | ||
472 | __gen5_write(32) | ||
473 | __gen5_write(64) | ||
474 | __gen4_write(8) | ||
475 | __gen4_write(16) | ||
476 | __gen4_write(32) | ||
477 | __gen4_write(64) | ||
478 | |||
479 | #undef __hsw_write | ||
480 | #undef __gen6_write | ||
481 | #undef __gen5_write | ||
482 | #undef __gen4_write | ||
483 | #undef REG_WRITE_HEADER | ||
484 | |||
485 | void intel_uncore_init(struct drm_device *dev) | ||
486 | { | ||
487 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
488 | |||
489 | INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work, | ||
490 | gen6_force_wake_work); | ||
491 | |||
492 | if (IS_VALLEYVIEW(dev)) { | ||
493 | dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; | ||
494 | dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; | ||
495 | } else if (IS_HASWELL(dev)) { | ||
496 | dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; | ||
497 | dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; | ||
498 | } else if (IS_IVYBRIDGE(dev)) { | ||
499 | u32 ecobus; | ||
500 | |||
501 | /* IVB configs may use multi-threaded forcewake */ | ||
502 | |||
503 | /* A small trick here - if the bios hasn't configured | ||
504 | * MT forcewake, and if the device is in RC6, then | ||
505 | * force_wake_mt_get will not wake the device and the | ||
506 | * ECOBUS read will return zero. Which will be | ||
507 | * (correctly) interpreted by the test below as MT | ||
508 | * forcewake being disabled. | ||
509 | */ | ||
510 | mutex_lock(&dev->struct_mutex); | ||
511 | __gen6_gt_force_wake_mt_get(dev_priv); | ||
512 | ecobus = __raw_i915_read32(dev_priv, ECOBUS); | ||
513 | __gen6_gt_force_wake_mt_put(dev_priv); | ||
514 | mutex_unlock(&dev->struct_mutex); | ||
515 | |||
516 | if (ecobus & FORCEWAKE_MT_ENABLE) { | ||
517 | dev_priv->uncore.funcs.force_wake_get = | ||
518 | __gen6_gt_force_wake_mt_get; | ||
519 | dev_priv->uncore.funcs.force_wake_put = | ||
520 | __gen6_gt_force_wake_mt_put; | ||
521 | } else { | ||
522 | DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); | ||
523 | DRM_INFO("when using vblank-synced partial screen updates.\n"); | ||
524 | dev_priv->uncore.funcs.force_wake_get = | ||
525 | __gen6_gt_force_wake_get; | ||
526 | dev_priv->uncore.funcs.force_wake_put = | ||
527 | __gen6_gt_force_wake_put; | ||
528 | } | ||
529 | } else if (IS_GEN6(dev)) { | ||
530 | dev_priv->uncore.funcs.force_wake_get = | ||
531 | __gen6_gt_force_wake_get; | ||
532 | dev_priv->uncore.funcs.force_wake_put = | ||
533 | __gen6_gt_force_wake_put; | ||
534 | } | ||
535 | |||
536 | switch (INTEL_INFO(dev)->gen) { | ||
537 | case 7: | ||
538 | case 6: | ||
539 | if (IS_HASWELL(dev)) { | ||
540 | dev_priv->uncore.funcs.mmio_writeb = hsw_write8; | ||
541 | dev_priv->uncore.funcs.mmio_writew = hsw_write16; | ||
542 | dev_priv->uncore.funcs.mmio_writel = hsw_write32; | ||
543 | dev_priv->uncore.funcs.mmio_writeq = hsw_write64; | ||
544 | } else { | ||
545 | dev_priv->uncore.funcs.mmio_writeb = gen6_write8; | ||
546 | dev_priv->uncore.funcs.mmio_writew = gen6_write16; | ||
547 | dev_priv->uncore.funcs.mmio_writel = gen6_write32; | ||
548 | dev_priv->uncore.funcs.mmio_writeq = gen6_write64; | ||
549 | } | ||
550 | dev_priv->uncore.funcs.mmio_readb = gen6_read8; | ||
551 | dev_priv->uncore.funcs.mmio_readw = gen6_read16; | ||
552 | dev_priv->uncore.funcs.mmio_readl = gen6_read32; | ||
553 | dev_priv->uncore.funcs.mmio_readq = gen6_read64; | ||
554 | break; | ||
555 | case 5: | ||
556 | dev_priv->uncore.funcs.mmio_writeb = gen5_write8; | ||
557 | dev_priv->uncore.funcs.mmio_writew = gen5_write16; | ||
558 | dev_priv->uncore.funcs.mmio_writel = gen5_write32; | ||
559 | dev_priv->uncore.funcs.mmio_writeq = gen5_write64; | ||
560 | dev_priv->uncore.funcs.mmio_readb = gen5_read8; | ||
561 | dev_priv->uncore.funcs.mmio_readw = gen5_read16; | ||
562 | dev_priv->uncore.funcs.mmio_readl = gen5_read32; | ||
563 | dev_priv->uncore.funcs.mmio_readq = gen5_read64; | ||
564 | break; | ||
565 | case 4: | ||
566 | case 3: | ||
567 | case 2: | ||
568 | dev_priv->uncore.funcs.mmio_writeb = gen4_write8; | ||
569 | dev_priv->uncore.funcs.mmio_writew = gen4_write16; | ||
570 | dev_priv->uncore.funcs.mmio_writel = gen4_write32; | ||
571 | dev_priv->uncore.funcs.mmio_writeq = gen4_write64; | ||
572 | dev_priv->uncore.funcs.mmio_readb = gen4_read8; | ||
573 | dev_priv->uncore.funcs.mmio_readw = gen4_read16; | ||
574 | dev_priv->uncore.funcs.mmio_readl = gen4_read32; | ||
575 | dev_priv->uncore.funcs.mmio_readq = gen4_read64; | ||
576 | break; | ||
577 | } | ||
578 | } | ||
579 | |||
580 | void intel_uncore_fini(struct drm_device *dev) | ||
581 | { | ||
582 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
583 | |||
584 | flush_delayed_work(&dev_priv->uncore.force_wake_work); | ||
585 | |||
586 | /* Paranoia: make sure we have disabled everything before we exit. */ | ||
587 | intel_uncore_sanitize(dev); | ||
588 | } | ||
446 | 589 | ||
447 | static const struct register_whitelist { | 590 | static const struct register_whitelist { |
448 | uint64_t offset; | 591 | uint64_t offset; |
@@ -490,36 +633,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, | |||
490 | return 0; | 633 | return 0; |
491 | } | 634 | } |
492 | 635 | ||
493 | static int i8xx_do_reset(struct drm_device *dev) | ||
494 | { | ||
495 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
496 | |||
497 | if (IS_I85X(dev)) | ||
498 | return -ENODEV; | ||
499 | |||
500 | I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); | ||
501 | POSTING_READ(D_STATE); | ||
502 | |||
503 | if (IS_I830(dev) || IS_845G(dev)) { | ||
504 | I915_WRITE(DEBUG_RESET_I830, | ||
505 | DEBUG_RESET_DISPLAY | | ||
506 | DEBUG_RESET_RENDER | | ||
507 | DEBUG_RESET_FULL); | ||
508 | POSTING_READ(DEBUG_RESET_I830); | ||
509 | msleep(1); | ||
510 | |||
511 | I915_WRITE(DEBUG_RESET_I830, 0); | ||
512 | POSTING_READ(DEBUG_RESET_I830); | ||
513 | } | ||
514 | |||
515 | msleep(1); | ||
516 | |||
517 | I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); | ||
518 | POSTING_READ(D_STATE); | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | static int i965_reset_complete(struct drm_device *dev) | 636 | static int i965_reset_complete(struct drm_device *dev) |
524 | { | 637 | { |
525 | u8 gdrst; | 638 | u8 gdrst; |
@@ -621,7 +734,6 @@ int intel_gpu_reset(struct drm_device *dev) | |||
621 | case 6: return gen6_do_reset(dev); | 734 | case 6: return gen6_do_reset(dev); |
622 | case 5: return ironlake_do_reset(dev); | 735 | case 5: return ironlake_do_reset(dev); |
623 | case 4: return i965_do_reset(dev); | 736 | case 4: return i965_do_reset(dev); |
624 | case 2: return i8xx_do_reset(dev); | ||
625 | default: return -ENODEV; | 737 | default: return -ENODEV; |
626 | } | 738 | } |
627 | } | 739 | } |
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index b487cdec5ee7..3a1c5fbae54a 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig | |||
@@ -5,6 +5,7 @@ config DRM_MGAG200 | |||
5 | select FB_SYS_COPYAREA | 5 | select FB_SYS_COPYAREA |
6 | select FB_SYS_IMAGEBLIT | 6 | select FB_SYS_IMAGEBLIT |
7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
8 | select DRM_KMS_FB_HELPER | ||
8 | select DRM_TTM | 9 | select DRM_TTM |
9 | help | 10 | help |
10 | This is a KMS driver for the MGA G200 server chips, it | 11 | This is a KMS driver for the MGA G200 server chips, it |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index a06c19cc56f8..f39ab7554fc9 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
@@ -14,6 +14,7 @@ config DRM_MSM | |||
14 | config DRM_MSM_FBDEV | 14 | config DRM_MSM_FBDEV |
15 | bool "Enable legacy fbdev support for MSM modesetting driver" | 15 | bool "Enable legacy fbdev support for MSM modesetting driver" |
16 | depends on DRM_MSM | 16 | depends on DRM_MSM |
17 | select DRM_KMS_FB_HELPER | ||
17 | select FB_SYS_FILLRECT | 18 | select FB_SYS_FILLRECT |
18 | select FB_SYS_COPYAREA | 19 | select FB_SYS_COPYAREA |
19 | select FB_SYS_IMAGEBLIT | 20 | select FB_SYS_IMAGEBLIT |
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ff80f12480ea..7cf787d697b1 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig | |||
@@ -3,6 +3,7 @@ config DRM_NOUVEAU | |||
3 | depends on DRM && PCI | 3 | depends on DRM && PCI |
4 | select FW_LOADER | 4 | select FW_LOADER |
5 | select DRM_KMS_HELPER | 5 | select DRM_KMS_HELPER |
6 | select DRM_KMS_FB_HELPER | ||
6 | select DRM_TTM | 7 | select DRM_TTM |
7 | select FB_CFB_FILLRECT | 8 | select FB_CFB_FILLRECT |
8 | select FB_CFB_COPYAREA | 9 | select FB_CFB_COPYAREA |
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 20c41e73d448..6c220cd3497a 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig | |||
@@ -5,6 +5,7 @@ config DRM_OMAP | |||
5 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM | 5 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM |
6 | depends on OMAP2_DSS | 6 | depends on OMAP2_DSS |
7 | select DRM_KMS_HELPER | 7 | select DRM_KMS_HELPER |
8 | select DRM_KMS_FB_HELPER | ||
8 | select FB_SYS_FILLRECT | 9 | select FB_SYS_FILLRECT |
9 | select FB_SYS_COPYAREA | 10 | select FB_SYS_COPYAREA |
10 | select FB_SYS_IMAGEBLIT | 11 | select FB_SYS_IMAGEBLIT |
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index d6c12796023c..037d324bf58f 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig | |||
@@ -6,6 +6,7 @@ config DRM_QXL | |||
6 | select FB_SYS_IMAGEBLIT | 6 | select FB_SYS_IMAGEBLIT |
7 | select FB_DEFERRED_IO | 7 | select FB_DEFERRED_IO |
8 | select DRM_KMS_HELPER | 8 | select DRM_KMS_HELPER |
9 | select DRM_KMS_FB_HELPER | ||
9 | select DRM_TTM | 10 | select DRM_TTM |
10 | help | 11 | help |
11 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. | 12 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. |
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index c590cd9dca0b..d8e835ac2c5e 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig | |||
@@ -4,6 +4,7 @@ config DRM_RCAR_DU | |||
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_CMA_HELPER | 5 | select DRM_KMS_CMA_HELPER |
6 | select DRM_GEM_CMA_HELPER | 6 | select DRM_GEM_CMA_HELPER |
7 | select DRM_KMS_FB_HELPER | ||
7 | help | 8 | help |
8 | Choose this option if you have an R-Car chipset. | 9 | Choose this option if you have an R-Car chipset. |
9 | If M is selected the module will be called rcar-du-drm. | 10 | If M is selected the module will be called rcar-du-drm. |
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig index ca498d151a76..d1372862d871 100644 --- a/drivers/gpu/drm/shmobile/Kconfig +++ b/drivers/gpu/drm/shmobile/Kconfig | |||
@@ -2,6 +2,7 @@ config DRM_SHMOBILE | |||
2 | tristate "DRM Support for SH Mobile" | 2 | tristate "DRM Support for SH Mobile" |
3 | depends on DRM && (ARM || SUPERH) | 3 | depends on DRM && (ARM || SUPERH) |
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_FB_HELPER | ||
5 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
6 | select DRM_GEM_CMA_HELPER | 7 | select DRM_GEM_CMA_HELPER |
7 | help | 8 | help |
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 7a4d10106906..7c3ef79fcb37 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig | |||
@@ -2,6 +2,7 @@ config DRM_TILCDC | |||
2 | tristate "DRM Support for TI LCDC Display Controller" | 2 | tristate "DRM Support for TI LCDC Display Controller" |
3 | depends on DRM && OF && ARM | 3 | depends on DRM && OF && ARM |
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_FB_HELPER | ||
5 | select DRM_KMS_CMA_HELPER | 6 | select DRM_KMS_CMA_HELPER |
6 | select DRM_GEM_CMA_HELPER | 7 | select DRM_GEM_CMA_HELPER |
7 | select VIDEOMODE_HELPERS | 8 | select VIDEOMODE_HELPERS |
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig index 6222af19f456..f02528686cd5 100644 --- a/drivers/gpu/drm/udl/Kconfig +++ b/drivers/gpu/drm/udl/Kconfig | |||
@@ -8,6 +8,7 @@ config DRM_UDL | |||
8 | select FB_SYS_IMAGEBLIT | 8 | select FB_SYS_IMAGEBLIT |
9 | select FB_DEFERRED_IO | 9 | select FB_DEFERRED_IO |
10 | select DRM_KMS_HELPER | 10 | select DRM_KMS_HELPER |
11 | select DRM_KMS_FB_HELPER | ||
11 | help | 12 | help |
12 | This is a KMS driver for the USB displaylink video adapters. | 13 | This is a KMS driver for the USB displaylink video adapters. |
13 | Say M/Y to add support for these devices via drm/kms interfaces. | 14 | Say M/Y to add support for these devices via drm/kms interfaces. |
diff --git a/drivers/gpu/host1x/drm/Kconfig b/drivers/gpu/host1x/drm/Kconfig index 69853a4de40a..0f36ddd74e87 100644 --- a/drivers/gpu/host1x/drm/Kconfig +++ b/drivers/gpu/host1x/drm/Kconfig | |||
@@ -2,6 +2,7 @@ config DRM_TEGRA | |||
2 | bool "NVIDIA Tegra DRM" | 2 | bool "NVIDIA Tegra DRM" |
3 | depends on DRM | 3 | depends on DRM |
4 | select DRM_KMS_HELPER | 4 | select DRM_KMS_HELPER |
5 | select DRM_KMS_FB_HELPER | ||
5 | select FB_SYS_FILLRECT | 6 | select FB_SYS_FILLRECT |
6 | select FB_SYS_COPYAREA | 7 | select FB_SYS_COPYAREA |
7 | select FB_SYS_IMAGEBLIT | 8 | select FB_SYS_IMAGEBLIT |