diff options
Diffstat (limited to 'drivers/gpu/drm')
362 files changed, 15393 insertions, 11165 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 7c88f12096c5..1fafc2f8e8f9 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -11,7 +11,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
11 | drm_sysfs.o drm_hashtab.o drm_mm.o \ | 11 | drm_sysfs.o drm_hashtab.o drm_mm.o \ |
12 | drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ | 12 | drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ |
13 | drm_encoder_slave.o \ | 13 | drm_encoder_slave.o \ |
14 | drm_trace_points.o drm_global.o drm_prime.o \ | 14 | drm_trace_points.o drm_prime.o \ |
15 | drm_rect.o drm_vma_manager.o drm_flip_work.o \ | 15 | drm_rect.o drm_vma_manager.o drm_flip_work.o \ |
16 | drm_modeset_lock.o drm_atomic.o drm_bridge.o \ | 16 | drm_modeset_lock.o drm_atomic.o drm_bridge.o \ |
17 | drm_framebuffer.o drm_connector.o drm_blend.o \ | 17 | drm_framebuffer.o drm_connector.o drm_blend.o \ |
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 138cb787d27e..f76bcb9c45e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
53 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ | 53 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ |
54 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ | 54 | amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ |
55 | amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ | 55 | amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ |
56 | amdgpu_gmc.o amdgpu_xgmi.o | 56 | amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o |
57 | 57 | ||
58 | # add asic specific block | 58 | # add asic specific block |
59 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ | 59 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ |
@@ -105,6 +105,7 @@ amdgpu-y += \ | |||
105 | # add GFX block | 105 | # add GFX block |
106 | amdgpu-y += \ | 106 | amdgpu-y += \ |
107 | amdgpu_gfx.o \ | 107 | amdgpu_gfx.o \ |
108 | amdgpu_rlc.o \ | ||
108 | gfx_v8_0.o \ | 109 | gfx_v8_0.o \ |
109 | gfx_v9_0.o | 110 | gfx_v9_0.o |
110 | 111 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d0102cfc8efb..42f882c633ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -75,6 +75,7 @@ | |||
75 | #include "amdgpu_sdma.h" | 75 | #include "amdgpu_sdma.h" |
76 | #include "amdgpu_dm.h" | 76 | #include "amdgpu_dm.h" |
77 | #include "amdgpu_virt.h" | 77 | #include "amdgpu_virt.h" |
78 | #include "amdgpu_csa.h" | ||
78 | #include "amdgpu_gart.h" | 79 | #include "amdgpu_gart.h" |
79 | #include "amdgpu_debugfs.h" | 80 | #include "amdgpu_debugfs.h" |
80 | #include "amdgpu_job.h" | 81 | #include "amdgpu_job.h" |
@@ -151,6 +152,7 @@ extern int amdgpu_compute_multipipe; | |||
151 | extern int amdgpu_gpu_recovery; | 152 | extern int amdgpu_gpu_recovery; |
152 | extern int amdgpu_emu_mode; | 153 | extern int amdgpu_emu_mode; |
153 | extern uint amdgpu_smu_memory_pool_size; | 154 | extern uint amdgpu_smu_memory_pool_size; |
155 | extern uint amdgpu_dc_feature_mask; | ||
154 | extern struct amdgpu_mgpu_info mgpu_info; | 156 | extern struct amdgpu_mgpu_info mgpu_info; |
155 | 157 | ||
156 | #ifdef CONFIG_DRM_AMDGPU_SI | 158 | #ifdef CONFIG_DRM_AMDGPU_SI |
@@ -432,7 +434,7 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT | |||
432 | * default non-graphics QWORD index is 0xe0 - 0xFF inclusive | 434 | * default non-graphics QWORD index is 0xe0 - 0xFF inclusive |
433 | */ | 435 | */ |
434 | 436 | ||
435 | /* sDMA engines reserved from 0xe0 -oxef */ | 437 | /* sDMA engines reserved from 0xe0 -0xef */ |
436 | AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, | 438 | AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, |
437 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, | 439 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, |
438 | AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, | 440 | AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, |
@@ -830,7 +832,6 @@ struct amdgpu_device { | |||
830 | bool need_dma32; | 832 | bool need_dma32; |
831 | bool need_swiotlb; | 833 | bool need_swiotlb; |
832 | bool accel_working; | 834 | bool accel_working; |
833 | struct work_struct reset_work; | ||
834 | struct notifier_block acpi_nb; | 835 | struct notifier_block acpi_nb; |
835 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; | 836 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; |
836 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; | 837 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c31a8849e9f8..60f9a87e9c74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
@@ -144,7 +144,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) | |||
144 | KGD_MAX_QUEUES); | 144 | KGD_MAX_QUEUES); |
145 | 145 | ||
146 | /* remove the KIQ bit as well */ | 146 | /* remove the KIQ bit as well */ |
147 | if (adev->gfx.kiq.ring.ready) | 147 | if (adev->gfx.kiq.ring.sched.ready) |
148 | clear_bit(amdgpu_gfx_queue_to_bit(adev, | 148 | clear_bit(amdgpu_gfx_queue_to_bit(adev, |
149 | adev->gfx.kiq.ring.me - 1, | 149 | adev->gfx.kiq.ring.me - 1, |
150 | adev->gfx.kiq.ring.pipe, | 150 | adev->gfx.kiq.ring.pipe, |
@@ -268,9 +268,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) | |||
268 | amdgpu_device_gpu_recover(adev, NULL); | 268 | amdgpu_device_gpu_recover(adev, NULL); |
269 | } | 269 | } |
270 | 270 | ||
271 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 271 | int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
272 | void **mem_obj, uint64_t *gpu_addr, | 272 | void **mem_obj, uint64_t *gpu_addr, |
273 | void **cpu_ptr, bool mqd_gfx9) | 273 | void **cpu_ptr, bool mqd_gfx9) |
274 | { | 274 | { |
275 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 275 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
276 | struct amdgpu_bo *bo = NULL; | 276 | struct amdgpu_bo *bo = NULL; |
@@ -340,7 +340,7 @@ allocate_mem_reserve_bo_failed: | |||
340 | return r; | 340 | return r; |
341 | } | 341 | } |
342 | 342 | ||
343 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) | 343 | void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) |
344 | { | 344 | { |
345 | struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; | 345 | struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; |
346 | 346 | ||
@@ -351,8 +351,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) | |||
351 | amdgpu_bo_unref(&(bo)); | 351 | amdgpu_bo_unref(&(bo)); |
352 | } | 352 | } |
353 | 353 | ||
354 | void get_local_mem_info(struct kgd_dev *kgd, | 354 | void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, |
355 | struct kfd_local_mem_info *mem_info) | 355 | struct kfd_local_mem_info *mem_info) |
356 | { | 356 | { |
357 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 357 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
358 | uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : | 358 | uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : |
@@ -383,7 +383,7 @@ void get_local_mem_info(struct kgd_dev *kgd, | |||
383 | mem_info->mem_clk_max = 100; | 383 | mem_info->mem_clk_max = 100; |
384 | } | 384 | } |
385 | 385 | ||
386 | uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) | 386 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd) |
387 | { | 387 | { |
388 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 388 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
389 | 389 | ||
@@ -392,7 +392,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) | |||
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) | 395 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) |
396 | { | 396 | { |
397 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 397 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
398 | 398 | ||
@@ -405,7 +405,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) | |||
405 | return 100; | 405 | return 100; |
406 | } | 406 | } |
407 | 407 | ||
408 | void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) | 408 | void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) |
409 | { | 409 | { |
410 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 410 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
411 | struct amdgpu_cu_info acu_info = adev->gfx.cu_info; | 411 | struct amdgpu_cu_info acu_info = adev->gfx.cu_info; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 8e0d4f7196b4..bcf587b4ba98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | |||
@@ -134,16 +134,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); | |||
134 | void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); | 134 | void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); |
135 | 135 | ||
136 | /* Shared API */ | 136 | /* Shared API */ |
137 | int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | 137 | int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, |
138 | void **mem_obj, uint64_t *gpu_addr, | 138 | void **mem_obj, uint64_t *gpu_addr, |
139 | void **cpu_ptr, bool mqd_gfx9); | 139 | void **cpu_ptr, bool mqd_gfx9); |
140 | void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); | 140 | void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); |
141 | void get_local_mem_info(struct kgd_dev *kgd, | 141 | void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, |
142 | struct kfd_local_mem_info *mem_info); | 142 | struct kfd_local_mem_info *mem_info); |
143 | uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); | 143 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); |
144 | 144 | ||
145 | uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); | 145 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); |
146 | void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); | 146 | void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); |
147 | uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); | 147 | uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); |
148 | uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); | 148 | uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); |
149 | 149 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 244d9834a381..72a357dae070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
@@ -173,13 +173,6 @@ static int get_tile_config(struct kgd_dev *kgd, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | static const struct kfd2kgd_calls kfd2kgd = { | 175 | static const struct kfd2kgd_calls kfd2kgd = { |
176 | .init_gtt_mem_allocation = alloc_gtt_mem, | ||
177 | .free_gtt_mem = free_gtt_mem, | ||
178 | .get_local_mem_info = get_local_mem_info, | ||
179 | .get_gpu_clock_counter = get_gpu_clock_counter, | ||
180 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | ||
181 | .alloc_pasid = amdgpu_pasid_alloc, | ||
182 | .free_pasid = amdgpu_pasid_free, | ||
183 | .program_sh_mem_settings = kgd_program_sh_mem_settings, | 176 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
184 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | 177 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
185 | .init_interrupts = kgd_init_interrupts, | 178 | .init_interrupts = kgd_init_interrupts, |
@@ -200,28 +193,10 @@ static const struct kfd2kgd_calls kfd2kgd = { | |||
200 | .get_fw_version = get_fw_version, | 193 | .get_fw_version = get_fw_version, |
201 | .set_scratch_backing_va = set_scratch_backing_va, | 194 | .set_scratch_backing_va = set_scratch_backing_va, |
202 | .get_tile_config = get_tile_config, | 195 | .get_tile_config = get_tile_config, |
203 | .get_cu_info = get_cu_info, | ||
204 | .get_vram_usage = amdgpu_amdkfd_get_vram_usage, | ||
205 | .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, | ||
206 | .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, | ||
207 | .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, | ||
208 | .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, | ||
209 | .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, | ||
210 | .set_vm_context_page_table_base = set_vm_context_page_table_base, | 196 | .set_vm_context_page_table_base = set_vm_context_page_table_base, |
211 | .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, | ||
212 | .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, | ||
213 | .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, | ||
214 | .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, | ||
215 | .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, | ||
216 | .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, | ||
217 | .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, | ||
218 | .invalidate_tlbs = invalidate_tlbs, | 197 | .invalidate_tlbs = invalidate_tlbs, |
219 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, | 198 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, |
220 | .submit_ib = amdgpu_amdkfd_submit_ib, | ||
221 | .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, | ||
222 | .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, | 199 | .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, |
223 | .gpu_recover = amdgpu_amdkfd_gpu_reset, | ||
224 | .set_compute_idle = amdgpu_amdkfd_set_compute_idle | ||
225 | }; | 200 | }; |
226 | 201 | ||
227 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) | 202 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 9f149914ad6c..0e2a56b6a9b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | |||
@@ -128,13 +128,6 @@ static int get_tile_config(struct kgd_dev *kgd, | |||
128 | } | 128 | } |
129 | 129 | ||
130 | static const struct kfd2kgd_calls kfd2kgd = { | 130 | static const struct kfd2kgd_calls kfd2kgd = { |
131 | .init_gtt_mem_allocation = alloc_gtt_mem, | ||
132 | .free_gtt_mem = free_gtt_mem, | ||
133 | .get_local_mem_info = get_local_mem_info, | ||
134 | .get_gpu_clock_counter = get_gpu_clock_counter, | ||
135 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | ||
136 | .alloc_pasid = amdgpu_pasid_alloc, | ||
137 | .free_pasid = amdgpu_pasid_free, | ||
138 | .program_sh_mem_settings = kgd_program_sh_mem_settings, | 131 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
139 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | 132 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
140 | .init_interrupts = kgd_init_interrupts, | 133 | .init_interrupts = kgd_init_interrupts, |
@@ -157,27 +150,9 @@ static const struct kfd2kgd_calls kfd2kgd = { | |||
157 | .get_fw_version = get_fw_version, | 150 | .get_fw_version = get_fw_version, |
158 | .set_scratch_backing_va = set_scratch_backing_va, | 151 | .set_scratch_backing_va = set_scratch_backing_va, |
159 | .get_tile_config = get_tile_config, | 152 | .get_tile_config = get_tile_config, |
160 | .get_cu_info = get_cu_info, | ||
161 | .get_vram_usage = amdgpu_amdkfd_get_vram_usage, | ||
162 | .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, | ||
163 | .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, | ||
164 | .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, | ||
165 | .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, | ||
166 | .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, | ||
167 | .set_vm_context_page_table_base = set_vm_context_page_table_base, | 153 | .set_vm_context_page_table_base = set_vm_context_page_table_base, |
168 | .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, | ||
169 | .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, | ||
170 | .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, | ||
171 | .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, | ||
172 | .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, | ||
173 | .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, | ||
174 | .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, | ||
175 | .invalidate_tlbs = invalidate_tlbs, | 154 | .invalidate_tlbs = invalidate_tlbs, |
176 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, | 155 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, |
177 | .submit_ib = amdgpu_amdkfd_submit_ib, | ||
178 | .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info, | ||
179 | .gpu_recover = amdgpu_amdkfd_gpu_reset, | ||
180 | .set_compute_idle = amdgpu_amdkfd_set_compute_idle | ||
181 | }; | 156 | }; |
182 | 157 | ||
183 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) | 158 | struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 42cb4c4e0929..03b604c96d94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | |||
@@ -46,38 +46,9 @@ | |||
46 | #include "v9_structs.h" | 46 | #include "v9_structs.h" |
47 | #include "soc15.h" | 47 | #include "soc15.h" |
48 | #include "soc15d.h" | 48 | #include "soc15d.h" |
49 | #include "mmhub_v1_0.h" | ||
50 | #include "gfxhub_v1_0.h" | ||
49 | 51 | ||
50 | /* HACK: MMHUB and GC both have VM-related register with the same | ||
51 | * names but different offsets. Define the MMHUB register we need here | ||
52 | * with a prefix. A proper solution would be to move the functions | ||
53 | * programming these registers into gfx_v9_0.c and mmhub_v1_0.c | ||
54 | * respectively. | ||
55 | */ | ||
56 | #define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3 | ||
57 | #define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0 | ||
58 | |||
59 | #define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 | ||
60 | #define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 | ||
61 | |||
62 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b | ||
63 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 | ||
64 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c | ||
65 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 | ||
66 | |||
67 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b | ||
68 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 | ||
69 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c | ||
70 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 | ||
71 | |||
72 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b | ||
73 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 | ||
74 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c | ||
75 | #define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 | ||
76 | |||
77 | #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 | ||
78 | #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 | ||
79 | #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 | ||
80 | #define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0 | ||
81 | 52 | ||
82 | #define V9_PIPE_PER_MEC (4) | 53 | #define V9_PIPE_PER_MEC (4) |
83 | #define V9_QUEUES_PER_PIPE_MEC (8) | 54 | #define V9_QUEUES_PER_PIPE_MEC (8) |
@@ -167,13 +138,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, | |||
167 | } | 138 | } |
168 | 139 | ||
169 | static const struct kfd2kgd_calls kfd2kgd = { | 140 | static const struct kfd2kgd_calls kfd2kgd = { |
170 | .init_gtt_mem_allocation = alloc_gtt_mem, | ||
171 | .free_gtt_mem = free_gtt_mem, | ||
172 | .get_local_mem_info = get_local_mem_info, | ||
173 | .get_gpu_clock_counter = get_gpu_clock_counter, | ||
174 | .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, | ||
175 | .alloc_pasid = amdgpu_pasid_alloc, | ||
176 | .free_pasid = amdgpu_pasid_free, | ||
177 | .program_sh_mem_settings = kgd_program_sh_mem_settings, | 141 | .program_sh_mem_settings = kgd_program_sh_mem_settings, |
178 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, | 142 | .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, |
179 | .init_interrupts = kgd_init_interrupts, | 143 | .init_interrupts = kgd_init_interrupts, |
@@ -196,26 +160,9 @@ static const struct kfd2kgd_calls kfd2kgd = { | |||
196 | .get_fw_version = get_fw_version, | 160 | .get_fw_version = get_fw_version, |
197 | .set_scratch_backing_va = set_scratch_backing_va, | 161 | .set_scratch_backing_va = set_scratch_backing_va, |
198 | .get_tile_config = amdgpu_amdkfd_get_tile_config, | 162 | .get_tile_config = amdgpu_amdkfd_get_tile_config, |
199 | .get_cu_info = get_cu_info, | ||
200 | .get_vram_usage = amdgpu_amdkfd_get_vram_usage, | ||
201 | .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, | ||
202 | .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, | ||
203 | .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, | ||
204 | .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm, | ||
205 | .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, | ||
206 | .set_vm_context_page_table_base = set_vm_context_page_table_base, | 163 | .set_vm_context_page_table_base = set_vm_context_page_table_base, |
207 | .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, | ||
208 | .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, | ||
209 | .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, | ||
210 | .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, | ||
211 | .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, | ||
212 | .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, | ||
213 | .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, | ||
214 | .invalidate_tlbs = invalidate_tlbs, | 164 | .invalidate_tlbs = invalidate_tlbs, |
215 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, | 165 | .invalidate_tlbs_vmid = invalidate_tlbs_vmid, |
216 | .submit_ib = amdgpu_amdkfd_submit_ib, | ||
217 | .gpu_recover = amdgpu_amdkfd_gpu_reset, | ||
218 | .set_compute_idle = amdgpu_amdkfd_set_compute_idle, | ||
219 | .get_hive_id = amdgpu_amdkfd_get_hive_id, | 166 | .get_hive_id = amdgpu_amdkfd_get_hive_id, |
220 | }; | 167 | }; |
221 | 168 | ||
@@ -785,15 +732,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, | |||
785 | static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) | 732 | static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) |
786 | { | 733 | { |
787 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; | 734 | struct amdgpu_device *adev = (struct amdgpu_device *) kgd; |
788 | uint32_t req = (1 << vmid) | | ||
789 | (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */ | ||
790 | VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK | | ||
791 | VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK | | ||
792 | VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK | | ||
793 | VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK | | ||
794 | VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK; | ||
795 | |||
796 | mutex_lock(&adev->srbm_mutex); | ||
797 | 735 | ||
798 | /* Use legacy mode tlb invalidation. | 736 | /* Use legacy mode tlb invalidation. |
799 | * | 737 | * |
@@ -810,34 +748,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) | |||
810 | * TODO 2: support range-based invalidation, requires kfg2kgd | 748 | * TODO 2: support range-based invalidation, requires kfg2kgd |
811 | * interface change | 749 | * interface change |
812 | */ | 750 | */ |
813 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32), | 751 | amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0); |
814 | 0xffffffff); | ||
815 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32), | ||
816 | 0x0000001f); | ||
817 | |||
818 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, | ||
819 | mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32), | ||
820 | 0xffffffff); | ||
821 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, | ||
822 | mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32), | ||
823 | 0x0000001f); | ||
824 | |||
825 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req); | ||
826 | |||
827 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ), | ||
828 | req); | ||
829 | |||
830 | while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) & | ||
831 | (1 << vmid))) | ||
832 | cpu_relax(); | ||
833 | |||
834 | while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0, | ||
835 | mmMMHUB_VM_INVALIDATE_ENG16_ACK)) & | ||
836 | (1 << vmid))) | ||
837 | cpu_relax(); | ||
838 | |||
839 | mutex_unlock(&adev->srbm_mutex); | ||
840 | |||
841 | } | 752 | } |
842 | 753 | ||
843 | static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) | 754 | static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) |
@@ -876,7 +787,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) | |||
876 | if (adev->in_gpu_reset) | 787 | if (adev->in_gpu_reset) |
877 | return -EIO; | 788 | return -EIO; |
878 | 789 | ||
879 | if (ring->ready) | 790 | if (ring->sched.ready) |
880 | return invalidate_tlbs_with_kiq(adev, pasid); | 791 | return invalidate_tlbs_with_kiq(adev, pasid); |
881 | 792 | ||
882 | for (vmid = 0; vmid < 16; vmid++) { | 793 | for (vmid = 0; vmid < 16; vmid++) { |
@@ -1016,7 +927,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, | |||
1016 | uint64_t page_table_base) | 927 | uint64_t page_table_base) |
1017 | { | 928 | { |
1018 | struct amdgpu_device *adev = get_amdgpu_device(kgd); | 929 | struct amdgpu_device *adev = get_amdgpu_device(kgd); |
1019 | uint64_t base = page_table_base | AMDGPU_PTE_VALID; | ||
1020 | 930 | ||
1021 | if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { | 931 | if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { |
1022 | pr_err("trying to set page table base for wrong VMID %u\n", | 932 | pr_err("trying to set page table base for wrong VMID %u\n", |
@@ -1028,25 +938,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, | |||
1028 | * now, all processes share the same address space size, like | 938 | * now, all processes share the same address space size, like |
1029 | * on GFX8 and older. | 939 | * on GFX8 and older. |
1030 | */ | 940 | */ |
1031 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); | 941 | mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); |
1032 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); | ||
1033 | |||
1034 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), | ||
1035 | lower_32_bits(adev->vm_manager.max_pfn - 1)); | ||
1036 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), | ||
1037 | upper_32_bits(adev->vm_manager.max_pfn - 1)); | ||
1038 | |||
1039 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); | ||
1040 | WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); | ||
1041 | |||
1042 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); | ||
1043 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); | ||
1044 | |||
1045 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), | ||
1046 | lower_32_bits(adev->vm_manager.max_pfn - 1)); | ||
1047 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), | ||
1048 | upper_32_bits(adev->vm_manager.max_pfn - 1)); | ||
1049 | 942 | ||
1050 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); | 943 | gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); |
1051 | WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); | ||
1052 | } | 944 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 8816c697b205..ceadeeadfa56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
330 | case CHIP_TOPAZ: | 330 | case CHIP_TOPAZ: |
331 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || | 331 | if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || |
332 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || | 332 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || |
333 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { | 333 | ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) || |
334 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) || | ||
335 | ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) { | ||
334 | info->is_kicker = true; | 336 | info->is_kicker = true; |
335 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); | 337 | strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); |
336 | } else | 338 | } else |
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
351 | if (type == CGS_UCODE_ID_SMU) { | 353 | if (type == CGS_UCODE_ID_SMU) { |
352 | if (((adev->pdev->device == 0x67ef) && | 354 | if (((adev->pdev->device == 0x67ef) && |
353 | ((adev->pdev->revision == 0xe0) || | 355 | ((adev->pdev->revision == 0xe0) || |
354 | (adev->pdev->revision == 0xe2) || | ||
355 | (adev->pdev->revision == 0xe5))) || | 356 | (adev->pdev->revision == 0xe5))) || |
356 | ((adev->pdev->device == 0x67ff) && | 357 | ((adev->pdev->device == 0x67ff) && |
357 | ((adev->pdev->revision == 0xcf) || | 358 | ((adev->pdev->revision == 0xcf) || |
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
359 | (adev->pdev->revision == 0xff)))) { | 360 | (adev->pdev->revision == 0xff)))) { |
360 | info->is_kicker = true; | 361 | info->is_kicker = true; |
361 | strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); | 362 | strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); |
362 | } else | 363 | } else if ((adev->pdev->device == 0x67ef) && |
364 | (adev->pdev->revision == 0xe2)) { | ||
365 | info->is_kicker = true; | ||
366 | strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin"); | ||
367 | } else { | ||
363 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); | 368 | strcpy(fw_name, "amdgpu/polaris11_smc.bin"); |
369 | } | ||
364 | } else if (type == CGS_UCODE_ID_SMU_SK) { | 370 | } else if (type == CGS_UCODE_ID_SMU_SK) { |
365 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); | 371 | strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); |
366 | } | 372 | } |
@@ -378,14 +384,31 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
378 | (adev->pdev->revision == 0xef))) { | 384 | (adev->pdev->revision == 0xef))) { |
379 | info->is_kicker = true; | 385 | info->is_kicker = true; |
380 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); | 386 | strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); |
381 | } else | 387 | } else if ((adev->pdev->device == 0x67df) && |
388 | ((adev->pdev->revision == 0xe1) || | ||
389 | (adev->pdev->revision == 0xf7))) { | ||
390 | info->is_kicker = true; | ||
391 | strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin"); | ||
392 | } else { | ||
382 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); | 393 | strcpy(fw_name, "amdgpu/polaris10_smc.bin"); |
394 | } | ||
383 | } else if (type == CGS_UCODE_ID_SMU_SK) { | 395 | } else if (type == CGS_UCODE_ID_SMU_SK) { |
384 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); | 396 | strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); |
385 | } | 397 | } |
386 | break; | 398 | break; |
387 | case CHIP_POLARIS12: | 399 | case CHIP_POLARIS12: |
388 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); | 400 | if (((adev->pdev->device == 0x6987) && |
401 | ((adev->pdev->revision == 0xc0) || | ||
402 | (adev->pdev->revision == 0xc3))) || | ||
403 | ((adev->pdev->device == 0x6981) && | ||
404 | ((adev->pdev->revision == 0x00) || | ||
405 | (adev->pdev->revision == 0x01) || | ||
406 | (adev->pdev->revision == 0x10)))) { | ||
407 | info->is_kicker = true; | ||
408 | strcpy(fw_name, "amdgpu/polaris12_k_smc.bin"); | ||
409 | } else { | ||
410 | strcpy(fw_name, "amdgpu/polaris12_smc.bin"); | ||
411 | } | ||
389 | break; | 412 | break; |
390 | case CHIP_VEGAM: | 413 | case CHIP_VEGAM: |
391 | strcpy(fw_name, "amdgpu/vegam_smc.bin"); | 414 | strcpy(fw_name, "amdgpu/vegam_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 35bc8fc3bc70..024dfbd87f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -1260,8 +1260,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, | |||
1260 | return 0; | 1260 | return 0; |
1261 | 1261 | ||
1262 | error_abort: | 1262 | error_abort: |
1263 | dma_fence_put(&job->base.s_fence->finished); | 1263 | drm_sched_job_cleanup(&job->base); |
1264 | job->base.s_fence = NULL; | ||
1265 | amdgpu_mn_unlock(p->mn); | 1264 | amdgpu_mn_unlock(p->mn); |
1266 | 1265 | ||
1267 | error_unlock: | 1266 | error_unlock: |
@@ -1285,7 +1284,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
1285 | 1284 | ||
1286 | r = amdgpu_cs_parser_init(&parser, data); | 1285 | r = amdgpu_cs_parser_init(&parser, data); |
1287 | if (r) { | 1286 | if (r) { |
1288 | DRM_ERROR("Failed to initialize parser !\n"); | 1287 | DRM_ERROR("Failed to initialize parser %d!\n", r); |
1289 | goto out; | 1288 | goto out; |
1290 | } | 1289 | } |
1291 | 1290 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c new file mode 100644 index 000000000000..0c590ddf250a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | |||
22 | * * Author: Monk.liu@amd.com | ||
23 | */ | ||
24 | |||
25 | #include "amdgpu.h" | ||
26 | |||
27 | uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) | ||
28 | { | ||
29 | uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; | ||
30 | |||
31 | addr -= AMDGPU_VA_RESERVED_SIZE; | ||
32 | addr = amdgpu_gmc_sign_extend(addr); | ||
33 | |||
34 | return addr; | ||
35 | } | ||
36 | |||
37 | int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, | ||
38 | u32 domain, uint32_t size) | ||
39 | { | ||
40 | int r; | ||
41 | void *ptr; | ||
42 | |||
43 | r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, | ||
44 | domain, bo, | ||
45 | NULL, &ptr); | ||
46 | if (!bo) | ||
47 | return -ENOMEM; | ||
48 | |||
49 | memset(ptr, 0, size); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void amdgpu_free_static_csa(struct amdgpu_bo **bo) | ||
54 | { | ||
55 | amdgpu_bo_free_kernel(bo, NULL, NULL); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * amdgpu_map_static_csa should be called during amdgpu_vm_init | ||
60 | * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command | ||
61 | * submission of GFX should use this virtual address within META_DATA init | ||
62 | * package to support SRIOV gfx preemption. | ||
63 | */ | ||
64 | int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
65 | struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, | ||
66 | uint64_t csa_addr, uint32_t size) | ||
67 | { | ||
68 | struct ww_acquire_ctx ticket; | ||
69 | struct list_head list; | ||
70 | struct amdgpu_bo_list_entry pd; | ||
71 | struct ttm_validate_buffer csa_tv; | ||
72 | int r; | ||
73 | |||
74 | INIT_LIST_HEAD(&list); | ||
75 | INIT_LIST_HEAD(&csa_tv.head); | ||
76 | csa_tv.bo = &bo->tbo; | ||
77 | csa_tv.shared = true; | ||
78 | |||
79 | list_add(&csa_tv.head, &list); | ||
80 | amdgpu_vm_get_pd_bo(vm, &list, &pd); | ||
81 | |||
82 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | ||
83 | if (r) { | ||
84 | DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); | ||
85 | return r; | ||
86 | } | ||
87 | |||
88 | *bo_va = amdgpu_vm_bo_add(adev, vm, bo); | ||
89 | if (!*bo_va) { | ||
90 | ttm_eu_backoff_reservation(&ticket, &list); | ||
91 | DRM_ERROR("failed to create bo_va for static CSA\n"); | ||
92 | return -ENOMEM; | ||
93 | } | ||
94 | |||
95 | r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, | ||
96 | size); | ||
97 | if (r) { | ||
98 | DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); | ||
99 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
100 | ttm_eu_backoff_reservation(&ticket, &list); | ||
101 | return r; | ||
102 | } | ||
103 | |||
104 | r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, | ||
105 | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | | ||
106 | AMDGPU_PTE_EXECUTABLE); | ||
107 | |||
108 | if (r) { | ||
109 | DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); | ||
110 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
111 | ttm_eu_backoff_reservation(&ticket, &list); | ||
112 | return r; | ||
113 | } | ||
114 | |||
115 | ttm_eu_backoff_reservation(&ticket, &list); | ||
116 | return 0; | ||
117 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h new file mode 100644 index 000000000000..524b4437a021 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Author: Monk.liu@amd.com | ||
23 | */ | ||
24 | |||
25 | #ifndef AMDGPU_CSA_MANAGER_H | ||
26 | #define AMDGPU_CSA_MANAGER_H | ||
27 | |||
28 | #define AMDGPU_CSA_SIZE (128 * 1024) | ||
29 | |||
30 | uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev); | ||
31 | uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); | ||
32 | int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, | ||
33 | u32 domain, uint32_t size); | ||
34 | int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
35 | struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, | ||
36 | uint64_t csa_addr, uint32_t size); | ||
37 | void amdgpu_free_static_csa(struct amdgpu_bo **bo); | ||
38 | |||
39 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 30bc345d6fdf..590588a82471 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) | |||
1656 | 1656 | ||
1657 | /* right after GMC hw init, we create CSA */ | 1657 | /* right after GMC hw init, we create CSA */ |
1658 | if (amdgpu_sriov_vf(adev)) { | 1658 | if (amdgpu_sriov_vf(adev)) { |
1659 | r = amdgpu_allocate_static_csa(adev); | 1659 | r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, |
1660 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1661 | AMDGPU_CSA_SIZE); | ||
1660 | if (r) { | 1662 | if (r) { |
1661 | DRM_ERROR("allocate CSA failed %d\n", r); | 1663 | DRM_ERROR("allocate CSA failed %d\n", r); |
1662 | return r; | 1664 | return r; |
@@ -1681,7 +1683,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) | |||
1681 | if (r) | 1683 | if (r) |
1682 | return r; | 1684 | return r; |
1683 | 1685 | ||
1684 | amdgpu_xgmi_add_device(adev); | 1686 | if (adev->gmc.xgmi.num_physical_nodes > 1) |
1687 | amdgpu_xgmi_add_device(adev); | ||
1685 | amdgpu_amdkfd_device_init(adev); | 1688 | amdgpu_amdkfd_device_init(adev); |
1686 | 1689 | ||
1687 | if (amdgpu_sriov_vf(adev)) | 1690 | if (amdgpu_sriov_vf(adev)) |
@@ -1890,7 +1893,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) | |||
1890 | 1893 | ||
1891 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | 1894 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1892 | amdgpu_ucode_free_bo(adev); | 1895 | amdgpu_ucode_free_bo(adev); |
1893 | amdgpu_free_static_csa(adev); | 1896 | amdgpu_free_static_csa(&adev->virt.csa_obj); |
1894 | amdgpu_device_wb_fini(adev); | 1897 | amdgpu_device_wb_fini(adev); |
1895 | amdgpu_device_vram_scratch_fini(adev); | 1898 | amdgpu_device_vram_scratch_fini(adev); |
1896 | } | 1899 | } |
@@ -3295,13 +3298,35 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) | |||
3295 | return false; | 3298 | return false; |
3296 | } | 3299 | } |
3297 | 3300 | ||
3298 | if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 && | 3301 | if (amdgpu_gpu_recovery == 0) |
3299 | !amdgpu_sriov_vf(adev))) { | 3302 | goto disabled; |
3300 | DRM_INFO("GPU recovery disabled.\n"); | 3303 | |
3301 | return false; | 3304 | if (amdgpu_sriov_vf(adev)) |
3305 | return true; | ||
3306 | |||
3307 | if (amdgpu_gpu_recovery == -1) { | ||
3308 | switch (adev->asic_type) { | ||
3309 | case CHIP_TOPAZ: | ||
3310 | case CHIP_TONGA: | ||
3311 | case CHIP_FIJI: | ||
3312 | case CHIP_POLARIS10: | ||
3313 | case CHIP_POLARIS11: | ||
3314 | case CHIP_POLARIS12: | ||
3315 | case CHIP_VEGAM: | ||
3316 | case CHIP_VEGA20: | ||
3317 | case CHIP_VEGA10: | ||
3318 | case CHIP_VEGA12: | ||
3319 | break; | ||
3320 | default: | ||
3321 | goto disabled; | ||
3322 | } | ||
3302 | } | 3323 | } |
3303 | 3324 | ||
3304 | return true; | 3325 | return true; |
3326 | |||
3327 | disabled: | ||
3328 | DRM_INFO("GPU recovery disabled.\n"); | ||
3329 | return false; | ||
3305 | } | 3330 | } |
3306 | 3331 | ||
3307 | /** | 3332 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 943dbf3c5da1..8de55f7f1a3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; | |||
127 | int amdgpu_gpu_recovery = -1; /* auto */ | 127 | int amdgpu_gpu_recovery = -1; /* auto */ |
128 | int amdgpu_emu_mode = 0; | 128 | int amdgpu_emu_mode = 0; |
129 | uint amdgpu_smu_memory_pool_size = 0; | 129 | uint amdgpu_smu_memory_pool_size = 0; |
130 | /* FBC (bit 0) disabled by default*/ | ||
131 | uint amdgpu_dc_feature_mask = 0; | ||
132 | |||
130 | struct amdgpu_mgpu_info mgpu_info = { | 133 | struct amdgpu_mgpu_info mgpu_info = { |
131 | .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), | 134 | .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), |
132 | }; | 135 | }; |
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644); | |||
631 | MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); | 634 | MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)"); |
632 | #endif | 635 | #endif |
633 | 636 | ||
637 | /** | ||
638 | * DOC: dcfeaturemask (uint) | ||
639 | * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h. | ||
640 | * The default is the current set of stable display features. | ||
641 | */ | ||
642 | MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))"); | ||
643 | module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444); | ||
644 | |||
634 | static const struct pci_device_id pciidlist[] = { | 645 | static const struct pci_device_id pciidlist[] = { |
635 | #ifdef CONFIG_DRM_AMDGPU_SI | 646 | #ifdef CONFIG_DRM_AMDGPU_SI |
636 | {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, | 647 | {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 5448cf27654e..ee47c11e92ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | |||
398 | ring->fence_drv.irq_type = irq_type; | 398 | ring->fence_drv.irq_type = irq_type; |
399 | ring->fence_drv.initialized = true; | 399 | ring->fence_drv.initialized = true; |
400 | 400 | ||
401 | dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, " | 401 | DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr " |
402 | "cpu addr 0x%p\n", ring->idx, | 402 | "0x%016llx, cpu addr 0x%p\n", ring->name, |
403 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); | 403 | ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr); |
404 | return 0; | 404 | return 0; |
405 | } | 405 | } |
406 | 406 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 11fea28f8ad3..6d11e1721147 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, | |||
248 | } | 248 | } |
249 | mb(); | 249 | mb(); |
250 | amdgpu_asic_flush_hdp(adev, NULL); | 250 | amdgpu_asic_flush_hdp(adev, NULL); |
251 | amdgpu_gmc_flush_gpu_tlb(adev, 0); | 251 | amdgpu_gmc_flush_gpu_tlb(adev, 0, 0); |
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, | |||
259 | * @offset: offset into the GPU's gart aperture | 259 | * @offset: offset into the GPU's gart aperture |
260 | * @pages: number of pages to bind | 260 | * @pages: number of pages to bind |
261 | * @dma_addr: DMA addresses of pages | 261 | * @dma_addr: DMA addresses of pages |
262 | * @flags: page table entry flags | ||
263 | * @dst: CPU address of the gart table | ||
262 | * | 264 | * |
263 | * Map the dma_addresses into GART entries (all asics). | 265 | * Map the dma_addresses into GART entries (all asics). |
264 | * Returns 0 for success, -EINVAL for failure. | 266 | * Returns 0 for success, -EINVAL for failure. |
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, | |||
331 | 333 | ||
332 | mb(); | 334 | mb(); |
333 | amdgpu_asic_flush_hdp(adev, NULL); | 335 | amdgpu_asic_flush_hdp(adev, NULL); |
334 | amdgpu_gmc_flush_gpu_tlb(adev, 0); | 336 | amdgpu_gmc_flush_gpu_tlb(adev, 0, 0); |
335 | return 0; | 337 | return 0; |
336 | } | 338 | } |
337 | 339 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 9ff62887e4e3..afa2e2877d87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | |||
@@ -41,6 +41,7 @@ struct amdgpu_bo; | |||
41 | 41 | ||
42 | struct amdgpu_gart { | 42 | struct amdgpu_gart { |
43 | struct amdgpu_bo *bo; | 43 | struct amdgpu_bo *bo; |
44 | /* CPU kmapped address of gart table */ | ||
44 | void *ptr; | 45 | void *ptr; |
45 | unsigned num_gpu_pages; | 46 | unsigned num_gpu_pages; |
46 | unsigned num_cpu_pages; | 47 | unsigned num_cpu_pages; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1a656b8657f7..6a70c0b7105f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <drm/drmP.h> | 25 | #include <drm/drmP.h> |
26 | #include "amdgpu.h" | 26 | #include "amdgpu.h" |
27 | #include "amdgpu_gfx.h" | 27 | #include "amdgpu_gfx.h" |
28 | #include "amdgpu_rlc.h" | ||
28 | 29 | ||
29 | /* delay 0.1 second to enable gfx off feature */ | 30 | /* delay 0.1 second to enable gfx off feature */ |
30 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) | 31 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index b61b5c11aead..f790e15bcd08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | |||
@@ -29,6 +29,7 @@ | |||
29 | */ | 29 | */ |
30 | #include "clearstate_defs.h" | 30 | #include "clearstate_defs.h" |
31 | #include "amdgpu_ring.h" | 31 | #include "amdgpu_ring.h" |
32 | #include "amdgpu_rlc.h" | ||
32 | 33 | ||
33 | /* GFX current status */ | 34 | /* GFX current status */ |
34 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L | 35 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L |
@@ -37,59 +38,6 @@ | |||
37 | #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L | 38 | #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L |
38 | #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L | 39 | #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L |
39 | 40 | ||
40 | |||
41 | struct amdgpu_rlc_funcs { | ||
42 | void (*enter_safe_mode)(struct amdgpu_device *adev); | ||
43 | void (*exit_safe_mode)(struct amdgpu_device *adev); | ||
44 | }; | ||
45 | |||
46 | struct amdgpu_rlc { | ||
47 | /* for power gating */ | ||
48 | struct amdgpu_bo *save_restore_obj; | ||
49 | uint64_t save_restore_gpu_addr; | ||
50 | volatile uint32_t *sr_ptr; | ||
51 | const u32 *reg_list; | ||
52 | u32 reg_list_size; | ||
53 | /* for clear state */ | ||
54 | struct amdgpu_bo *clear_state_obj; | ||
55 | uint64_t clear_state_gpu_addr; | ||
56 | volatile uint32_t *cs_ptr; | ||
57 | const struct cs_section_def *cs_data; | ||
58 | u32 clear_state_size; | ||
59 | /* for cp tables */ | ||
60 | struct amdgpu_bo *cp_table_obj; | ||
61 | uint64_t cp_table_gpu_addr; | ||
62 | volatile uint32_t *cp_table_ptr; | ||
63 | u32 cp_table_size; | ||
64 | |||
65 | /* safe mode for updating CG/PG state */ | ||
66 | bool in_safe_mode; | ||
67 | const struct amdgpu_rlc_funcs *funcs; | ||
68 | |||
69 | /* for firmware data */ | ||
70 | u32 save_and_restore_offset; | ||
71 | u32 clear_state_descriptor_offset; | ||
72 | u32 avail_scratch_ram_locations; | ||
73 | u32 reg_restore_list_size; | ||
74 | u32 reg_list_format_start; | ||
75 | u32 reg_list_format_separate_start; | ||
76 | u32 starting_offsets_start; | ||
77 | u32 reg_list_format_size_bytes; | ||
78 | u32 reg_list_size_bytes; | ||
79 | u32 reg_list_format_direct_reg_list_length; | ||
80 | u32 save_restore_list_cntl_size_bytes; | ||
81 | u32 save_restore_list_gpm_size_bytes; | ||
82 | u32 save_restore_list_srm_size_bytes; | ||
83 | |||
84 | u32 *register_list_format; | ||
85 | u32 *register_restore; | ||
86 | u8 *save_restore_list_cntl; | ||
87 | u8 *save_restore_list_gpm; | ||
88 | u8 *save_restore_list_srm; | ||
89 | |||
90 | bool is_rlc_v2_1; | ||
91 | }; | ||
92 | |||
93 | #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES | 41 | #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES |
94 | 42 | ||
95 | struct amdgpu_mec { | 43 | struct amdgpu_mec { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 6fa7ef446e46..8c57924c075f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | |||
@@ -64,7 +64,7 @@ struct amdgpu_vmhub { | |||
64 | struct amdgpu_gmc_funcs { | 64 | struct amdgpu_gmc_funcs { |
65 | /* flush the vm tlb via mmio */ | 65 | /* flush the vm tlb via mmio */ |
66 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, | 66 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, |
67 | uint32_t vmid); | 67 | uint32_t vmid, uint32_t flush_type); |
68 | /* flush the vm tlb via ring */ | 68 | /* flush the vm tlb via ring */ |
69 | uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, | 69 | uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid, |
70 | uint64_t pd_addr); | 70 | uint64_t pd_addr); |
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs { | |||
89 | 89 | ||
90 | struct amdgpu_xgmi { | 90 | struct amdgpu_xgmi { |
91 | /* from psp */ | 91 | /* from psp */ |
92 | u64 device_id; | 92 | u64 node_id; |
93 | u64 hive_id; | 93 | u64 hive_id; |
94 | /* fixed per family */ | 94 | /* fixed per family */ |
95 | u64 node_segment_size; | 95 | u64 node_segment_size; |
@@ -151,7 +151,7 @@ struct amdgpu_gmc { | |||
151 | struct amdgpu_xgmi xgmi; | 151 | struct amdgpu_xgmi xgmi; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) | 154 | #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type)) |
155 | #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) | 155 | #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) |
156 | #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) | 156 | #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) |
157 | #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) | 157 | #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index b8963b725dfa..c48207b377bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
146 | fence_ctx = 0; | 146 | fence_ctx = 0; |
147 | } | 147 | } |
148 | 148 | ||
149 | if (!ring->ready) { | 149 | if (!ring->sched.ready) { |
150 | dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); | 150 | dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); |
151 | return -EINVAL; | 151 | return -EINVAL; |
152 | } | 152 | } |
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
221 | !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ | 221 | !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ |
222 | continue; | 222 | continue; |
223 | 223 | ||
224 | amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0, | 224 | amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); |
225 | need_ctx_switch); | ||
226 | need_ctx_switch = false; | 225 | need_ctx_switch = false; |
227 | } | 226 | } |
228 | 227 | ||
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |||
347 | tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; | 346 | tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT; |
348 | } | 347 | } |
349 | 348 | ||
350 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 349 | for (i = 0; i < adev->num_rings; ++i) { |
351 | struct amdgpu_ring *ring = adev->rings[i]; | 350 | struct amdgpu_ring *ring = adev->rings[i]; |
352 | long tmo; | 351 | long tmo; |
353 | 352 | ||
354 | if (!ring || !ring->ready) | 353 | /* KIQ rings don't have an IB test because we never submit IBs |
355 | continue; | 354 | * to them and they have no interrupt support. |
356 | |||
357 | /* skip IB tests for KIQ in general for the below reasons: | ||
358 | * 1. We never submit IBs to the KIQ | ||
359 | * 2. KIQ doesn't use the EOP interrupts, | ||
360 | * we use some other CP interrupt. | ||
361 | */ | 355 | */ |
362 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) | 356 | if (!ring->sched.ready || !ring->funcs->test_ib) |
363 | continue; | 357 | continue; |
364 | 358 | ||
365 | /* MM engine need more time */ | 359 | /* MM engine need more time */ |
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) | |||
374 | tmo = tmo_gfx; | 368 | tmo = tmo_gfx; |
375 | 369 | ||
376 | r = amdgpu_ring_test_ib(ring, tmo); | 370 | r = amdgpu_ring_test_ib(ring, tmo); |
377 | if (r) { | 371 | if (!r) { |
378 | ring->ready = false; | 372 | DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", |
379 | 373 | ring->name); | |
380 | if (ring == &adev->gfx.gfx_ring[0]) { | 374 | continue; |
381 | /* oh, oh, that's really bad */ | 375 | } |
382 | DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r); | 376 | |
383 | adev->accel_working = false; | 377 | ring->sched.ready = false; |
384 | return r; | 378 | DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n", |
385 | 379 | ring->name, r); | |
386 | } else { | 380 | |
387 | /* still not good, but we can live with it */ | 381 | if (ring == &adev->gfx.gfx_ring[0]) { |
388 | DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r); | 382 | /* oh, oh, that's really bad */ |
389 | ret = r; | 383 | adev->accel_working = false; |
390 | } | 384 | return r; |
385 | |||
386 | } else { | ||
387 | ret = r; | ||
391 | } | 388 | } |
392 | } | 389 | } |
393 | return ret; | 390 | return ret; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 52c17f6219a7..6b6524f04ce0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | |||
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) | |||
94 | } | 94 | } |
95 | 95 | ||
96 | /** | 96 | /** |
97 | * amdgpu_irq_reset_work_func - execute GPU reset | ||
98 | * | ||
99 | * @work: work struct pointer | ||
100 | * | ||
101 | * Execute scheduled GPU reset (Cayman+). | ||
102 | * This function is called when the IRQ handler thinks we need a GPU reset. | ||
103 | */ | ||
104 | static void amdgpu_irq_reset_work_func(struct work_struct *work) | ||
105 | { | ||
106 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, | ||
107 | reset_work); | ||
108 | |||
109 | if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev)) | ||
110 | amdgpu_device_gpu_recover(adev, NULL); | ||
111 | } | ||
112 | |||
113 | /** | ||
114 | * amdgpu_irq_disable_all - disable *all* interrupts | 97 | * amdgpu_irq_disable_all - disable *all* interrupts |
115 | * | 98 | * |
116 | * @adev: amdgpu device pointer | 99 | * @adev: amdgpu device pointer |
@@ -262,15 +245,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev) | |||
262 | amdgpu_hotplug_work_func); | 245 | amdgpu_hotplug_work_func); |
263 | } | 246 | } |
264 | 247 | ||
265 | INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); | ||
266 | |||
267 | adev->irq.installed = true; | 248 | adev->irq.installed = true; |
268 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); | 249 | r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); |
269 | if (r) { | 250 | if (r) { |
270 | adev->irq.installed = false; | 251 | adev->irq.installed = false; |
271 | if (!amdgpu_device_has_dc_support(adev)) | 252 | if (!amdgpu_device_has_dc_support(adev)) |
272 | flush_work(&adev->hotplug_work); | 253 | flush_work(&adev->hotplug_work); |
273 | cancel_work_sync(&adev->reset_work); | ||
274 | return r; | 254 | return r; |
275 | } | 255 | } |
276 | adev->ddev->max_vblank_count = 0x00ffffff; | 256 | adev->ddev->max_vblank_count = 0x00ffffff; |
@@ -299,7 +279,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) | |||
299 | pci_disable_msi(adev->pdev); | 279 | pci_disable_msi(adev->pdev); |
300 | if (!amdgpu_device_has_dc_support(adev)) | 280 | if (!amdgpu_device_has_dc_support(adev)) |
301 | flush_work(&adev->hotplug_work); | 281 | flush_work(&adev->hotplug_work); |
302 | cancel_work_sync(&adev->reset_work); | ||
303 | } | 282 | } |
304 | 283 | ||
305 | for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { | 284 | for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 755f733bf0d9..e0af44fd6a0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | |||
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) | |||
112 | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); | 112 | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); |
113 | struct amdgpu_job *job = to_amdgpu_job(s_job); | 113 | struct amdgpu_job *job = to_amdgpu_job(s_job); |
114 | 114 | ||
115 | drm_sched_job_cleanup(s_job); | ||
116 | |||
115 | amdgpu_ring_priority_put(ring, s_job->s_priority); | 117 | amdgpu_ring_priority_put(ring, s_job->s_priority); |
116 | dma_fence_put(job->fence); | 118 | dma_fence_put(job->fence); |
117 | amdgpu_sync_free(&job->sync); | 119 | amdgpu_sync_free(&job->sync); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 57cfe78a262b..e1b46a6703de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | |||
@@ -33,6 +33,8 @@ | |||
33 | #define to_amdgpu_job(sched_job) \ | 33 | #define to_amdgpu_job(sched_job) \ |
34 | container_of((sched_job), struct amdgpu_job, base) | 34 | container_of((sched_job), struct amdgpu_job, base) |
35 | 35 | ||
36 | #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0) | ||
37 | |||
36 | struct amdgpu_fence; | 38 | struct amdgpu_fence; |
37 | 39 | ||
38 | struct amdgpu_job { | 40 | struct amdgpu_job { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 81732a84c2ab..9b3164c0f861 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
336 | case AMDGPU_HW_IP_GFX: | 336 | case AMDGPU_HW_IP_GFX: |
337 | type = AMD_IP_BLOCK_TYPE_GFX; | 337 | type = AMD_IP_BLOCK_TYPE_GFX; |
338 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 338 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
339 | if (adev->gfx.gfx_ring[i].ready) | 339 | if (adev->gfx.gfx_ring[i].sched.ready) |
340 | ++num_rings; | 340 | ++num_rings; |
341 | ib_start_alignment = 32; | 341 | ib_start_alignment = 32; |
342 | ib_size_alignment = 32; | 342 | ib_size_alignment = 32; |
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
344 | case AMDGPU_HW_IP_COMPUTE: | 344 | case AMDGPU_HW_IP_COMPUTE: |
345 | type = AMD_IP_BLOCK_TYPE_GFX; | 345 | type = AMD_IP_BLOCK_TYPE_GFX; |
346 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 346 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
347 | if (adev->gfx.compute_ring[i].ready) | 347 | if (adev->gfx.compute_ring[i].sched.ready) |
348 | ++num_rings; | 348 | ++num_rings; |
349 | ib_start_alignment = 32; | 349 | ib_start_alignment = 32; |
350 | ib_size_alignment = 32; | 350 | ib_size_alignment = 32; |
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
352 | case AMDGPU_HW_IP_DMA: | 352 | case AMDGPU_HW_IP_DMA: |
353 | type = AMD_IP_BLOCK_TYPE_SDMA; | 353 | type = AMD_IP_BLOCK_TYPE_SDMA; |
354 | for (i = 0; i < adev->sdma.num_instances; i++) | 354 | for (i = 0; i < adev->sdma.num_instances; i++) |
355 | if (adev->sdma.instance[i].ring.ready) | 355 | if (adev->sdma.instance[i].ring.sched.ready) |
356 | ++num_rings; | 356 | ++num_rings; |
357 | ib_start_alignment = 256; | 357 | ib_start_alignment = 256; |
358 | ib_size_alignment = 4; | 358 | ib_size_alignment = 4; |
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
363 | if (adev->uvd.harvest_config & (1 << i)) | 363 | if (adev->uvd.harvest_config & (1 << i)) |
364 | continue; | 364 | continue; |
365 | 365 | ||
366 | if (adev->uvd.inst[i].ring.ready) | 366 | if (adev->uvd.inst[i].ring.sched.ready) |
367 | ++num_rings; | 367 | ++num_rings; |
368 | } | 368 | } |
369 | ib_start_alignment = 64; | 369 | ib_start_alignment = 64; |
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
372 | case AMDGPU_HW_IP_VCE: | 372 | case AMDGPU_HW_IP_VCE: |
373 | type = AMD_IP_BLOCK_TYPE_VCE; | 373 | type = AMD_IP_BLOCK_TYPE_VCE; |
374 | for (i = 0; i < adev->vce.num_rings; i++) | 374 | for (i = 0; i < adev->vce.num_rings; i++) |
375 | if (adev->vce.ring[i].ready) | 375 | if (adev->vce.ring[i].sched.ready) |
376 | ++num_rings; | 376 | ++num_rings; |
377 | ib_start_alignment = 4; | 377 | ib_start_alignment = 4; |
378 | ib_size_alignment = 1; | 378 | ib_size_alignment = 1; |
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
384 | continue; | 384 | continue; |
385 | 385 | ||
386 | for (j = 0; j < adev->uvd.num_enc_rings; j++) | 386 | for (j = 0; j < adev->uvd.num_enc_rings; j++) |
387 | if (adev->uvd.inst[i].ring_enc[j].ready) | 387 | if (adev->uvd.inst[i].ring_enc[j].sched.ready) |
388 | ++num_rings; | 388 | ++num_rings; |
389 | } | 389 | } |
390 | ib_start_alignment = 64; | 390 | ib_start_alignment = 64; |
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
392 | break; | 392 | break; |
393 | case AMDGPU_HW_IP_VCN_DEC: | 393 | case AMDGPU_HW_IP_VCN_DEC: |
394 | type = AMD_IP_BLOCK_TYPE_VCN; | 394 | type = AMD_IP_BLOCK_TYPE_VCN; |
395 | if (adev->vcn.ring_dec.ready) | 395 | if (adev->vcn.ring_dec.sched.ready) |
396 | ++num_rings; | 396 | ++num_rings; |
397 | ib_start_alignment = 16; | 397 | ib_start_alignment = 16; |
398 | ib_size_alignment = 16; | 398 | ib_size_alignment = 16; |
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | |||
400 | case AMDGPU_HW_IP_VCN_ENC: | 400 | case AMDGPU_HW_IP_VCN_ENC: |
401 | type = AMD_IP_BLOCK_TYPE_VCN; | 401 | type = AMD_IP_BLOCK_TYPE_VCN; |
402 | for (i = 0; i < adev->vcn.num_enc_rings; i++) | 402 | for (i = 0; i < adev->vcn.num_enc_rings; i++) |
403 | if (adev->vcn.ring_enc[i].ready) | 403 | if (adev->vcn.ring_enc[i].sched.ready) |
404 | ++num_rings; | 404 | ++num_rings; |
405 | ib_start_alignment = 64; | 405 | ib_start_alignment = 64; |
406 | ib_size_alignment = 1; | 406 | ib_size_alignment = 1; |
407 | break; | 407 | break; |
408 | case AMDGPU_HW_IP_VCN_JPEG: | 408 | case AMDGPU_HW_IP_VCN_JPEG: |
409 | type = AMD_IP_BLOCK_TYPE_VCN; | 409 | type = AMD_IP_BLOCK_TYPE_VCN; |
410 | if (adev->vcn.ring_jpeg.ready) | 410 | if (adev->vcn.ring_jpeg.sched.ready) |
411 | ++num_rings; | 411 | ++num_rings; |
412 | ib_start_alignment = 16; | 412 | ib_start_alignment = 16; |
413 | ib_size_alignment = 16; | 413 | ib_size_alignment = 16; |
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
978 | } | 978 | } |
979 | 979 | ||
980 | if (amdgpu_sriov_vf(adev)) { | 980 | if (amdgpu_sriov_vf(adev)) { |
981 | r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); | 981 | uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; |
982 | |||
983 | r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, | ||
984 | &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE); | ||
982 | if (r) | 985 | if (r) |
983 | goto error_vm; | 986 | goto error_vm; |
984 | } | 987 | } |
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, | |||
1048 | pasid = fpriv->vm.pasid; | 1051 | pasid = fpriv->vm.pasid; |
1049 | pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); | 1052 | pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); |
1050 | 1053 | ||
1051 | amdgpu_vm_fini(adev, &fpriv->vm); | ||
1052 | amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); | 1054 | amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); |
1055 | amdgpu_vm_fini(adev, &fpriv->vm); | ||
1053 | 1056 | ||
1054 | if (pasid) | 1057 | if (pasid) |
1055 | amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); | 1058 | amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb7..11723d8fffbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -57,7 +57,6 @@ struct amdgpu_hpd; | |||
57 | #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) | 57 | #define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base) |
58 | #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) | 58 | #define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base) |
59 | #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) | 59 | #define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base) |
60 | #define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base) | ||
61 | 60 | ||
62 | #define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base); | 61 | #define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base); |
63 | 62 | ||
@@ -325,7 +324,7 @@ struct amdgpu_mode_info { | |||
325 | struct card_info *atom_card_info; | 324 | struct card_info *atom_card_info; |
326 | bool mode_config_initialized; | 325 | bool mode_config_initialized; |
327 | struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; | 326 | struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS]; |
328 | struct amdgpu_plane *planes[AMDGPU_MAX_PLANES]; | 327 | struct drm_plane *planes[AMDGPU_MAX_PLANES]; |
329 | struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; | 328 | struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS]; |
330 | /* DVI-I properties */ | 329 | /* DVI-I properties */ |
331 | struct drm_property *coherent_mode_property; | 330 | struct drm_property *coherent_mode_property; |
@@ -434,11 +433,6 @@ struct amdgpu_crtc { | |||
434 | struct drm_pending_vblank_event *event; | 433 | struct drm_pending_vblank_event *event; |
435 | }; | 434 | }; |
436 | 435 | ||
437 | struct amdgpu_plane { | ||
438 | struct drm_plane base; | ||
439 | enum drm_plane_type plane_type; | ||
440 | }; | ||
441 | |||
442 | struct amdgpu_encoder_atom_dig { | 436 | struct amdgpu_encoder_atom_dig { |
443 | bool linkb; | 437 | bool linkb; |
444 | /* atom dig */ | 438 | /* atom dig */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 59cc678de8c1..7235cd0b0fa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -2129,7 +2129,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) | |||
2129 | 2129 | ||
2130 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 2130 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
2131 | struct amdgpu_ring *ring = adev->rings[i]; | 2131 | struct amdgpu_ring *ring = adev->rings[i]; |
2132 | if (ring && ring->ready) | 2132 | if (ring && ring->sched.ready) |
2133 | amdgpu_fence_wait_empty(ring); | 2133 | amdgpu_fence_wait_empty(ring); |
2134 | } | 2134 | } |
2135 | 2135 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 25d2f3e757f1..e05dc66b1090 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle) | |||
90 | adev->psp.sos_fw = NULL; | 90 | adev->psp.sos_fw = NULL; |
91 | release_firmware(adev->psp.asd_fw); | 91 | release_firmware(adev->psp.asd_fw); |
92 | adev->psp.asd_fw = NULL; | 92 | adev->psp.asd_fw = NULL; |
93 | release_firmware(adev->psp.ta_fw); | ||
94 | adev->psp.ta_fw = NULL; | ||
93 | return 0; | 95 | return 0; |
94 | } | 96 | } |
95 | 97 | ||
@@ -118,21 +120,25 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, | |||
118 | static int | 120 | static int |
119 | psp_cmd_submit_buf(struct psp_context *psp, | 121 | psp_cmd_submit_buf(struct psp_context *psp, |
120 | struct amdgpu_firmware_info *ucode, | 122 | struct amdgpu_firmware_info *ucode, |
121 | struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr, | 123 | struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) |
122 | int index) | ||
123 | { | 124 | { |
124 | int ret; | 125 | int ret; |
126 | int index; | ||
125 | 127 | ||
126 | memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); | 128 | memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE); |
127 | 129 | ||
128 | memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); | 130 | memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp)); |
129 | 131 | ||
132 | index = atomic_inc_return(&psp->fence_value); | ||
130 | ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, | 133 | ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr, |
131 | fence_mc_addr, index); | 134 | fence_mc_addr, index); |
135 | if (ret) { | ||
136 | atomic_dec(&psp->fence_value); | ||
137 | return ret; | ||
138 | } | ||
132 | 139 | ||
133 | while (*((unsigned int *)psp->fence_buf) != index) { | 140 | while (*((unsigned int *)psp->fence_buf) != index) |
134 | msleep(1); | 141 | msleep(1); |
135 | } | ||
136 | 142 | ||
137 | /* the status field must be 0 after FW is loaded */ | 143 | /* the status field must be 0 after FW is loaded */ |
138 | if (ucode && psp->cmd_buf_mem->resp.status) { | 144 | if (ucode && psp->cmd_buf_mem->resp.status) { |
@@ -191,7 +197,7 @@ static int psp_tmr_load(struct psp_context *psp) | |||
191 | PSP_TMR_SIZE, psp->tmr_mc_addr); | 197 | PSP_TMR_SIZE, psp->tmr_mc_addr); |
192 | 198 | ||
193 | ret = psp_cmd_submit_buf(psp, NULL, cmd, | 199 | ret = psp_cmd_submit_buf(psp, NULL, cmd, |
194 | psp->fence_buf_mc_addr, 1); | 200 | psp->fence_buf_mc_addr); |
195 | if (ret) | 201 | if (ret) |
196 | goto failed; | 202 | goto failed; |
197 | 203 | ||
@@ -258,13 +264,194 @@ static int psp_asd_load(struct psp_context *psp) | |||
258 | psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); | 264 | psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE); |
259 | 265 | ||
260 | ret = psp_cmd_submit_buf(psp, NULL, cmd, | 266 | ret = psp_cmd_submit_buf(psp, NULL, cmd, |
261 | psp->fence_buf_mc_addr, 2); | 267 | psp->fence_buf_mc_addr); |
268 | |||
269 | kfree(cmd); | ||
270 | |||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, | ||
275 | uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared, | ||
276 | uint32_t xgmi_ta_size, uint32_t shared_size) | ||
277 | { | ||
278 | cmd->cmd_id = GFX_CMD_ID_LOAD_TA; | ||
279 | cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc); | ||
280 | cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc); | ||
281 | cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size; | ||
282 | |||
283 | cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared); | ||
284 | cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared); | ||
285 | cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; | ||
286 | } | ||
287 | |||
288 | static int psp_xgmi_init_shared_buf(struct psp_context *psp) | ||
289 | { | ||
290 | int ret; | ||
291 | |||
292 | /* | ||
293 | * Allocate 16k memory aligned to 4k from Frame Buffer (local | ||
294 | * physical) for xgmi ta <-> Driver | ||
295 | */ | ||
296 | ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE, | ||
297 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | ||
298 | &psp->xgmi_context.xgmi_shared_bo, | ||
299 | &psp->xgmi_context.xgmi_shared_mc_addr, | ||
300 | &psp->xgmi_context.xgmi_shared_buf); | ||
301 | |||
302 | return ret; | ||
303 | } | ||
304 | |||
305 | static int psp_xgmi_load(struct psp_context *psp) | ||
306 | { | ||
307 | int ret; | ||
308 | struct psp_gfx_cmd_resp *cmd; | ||
309 | |||
310 | /* | ||
311 | * TODO: bypass the loading in sriov for now | ||
312 | */ | ||
313 | if (amdgpu_sriov_vf(psp->adev)) | ||
314 | return 0; | ||
315 | |||
316 | cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); | ||
317 | if (!cmd) | ||
318 | return -ENOMEM; | ||
319 | |||
320 | memset(psp->fw_pri_buf, 0, PSP_1_MEG); | ||
321 | memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); | ||
322 | |||
323 | psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, | ||
324 | psp->xgmi_context.xgmi_shared_mc_addr, | ||
325 | psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE); | ||
326 | |||
327 | ret = psp_cmd_submit_buf(psp, NULL, cmd, | ||
328 | psp->fence_buf_mc_addr); | ||
329 | |||
330 | if (!ret) { | ||
331 | psp->xgmi_context.initialized = 1; | ||
332 | psp->xgmi_context.session_id = cmd->resp.session_id; | ||
333 | } | ||
334 | |||
335 | kfree(cmd); | ||
336 | |||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, | ||
341 | uint32_t xgmi_session_id) | ||
342 | { | ||
343 | cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; | ||
344 | cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id; | ||
345 | } | ||
346 | |||
347 | static int psp_xgmi_unload(struct psp_context *psp) | ||
348 | { | ||
349 | int ret; | ||
350 | struct psp_gfx_cmd_resp *cmd; | ||
351 | |||
352 | /* | ||
353 | * TODO: bypass the unloading in sriov for now | ||
354 | */ | ||
355 | if (amdgpu_sriov_vf(psp->adev)) | ||
356 | return 0; | ||
357 | |||
358 | cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); | ||
359 | if (!cmd) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); | ||
363 | |||
364 | ret = psp_cmd_submit_buf(psp, NULL, cmd, | ||
365 | psp->fence_buf_mc_addr); | ||
262 | 366 | ||
263 | kfree(cmd); | 367 | kfree(cmd); |
264 | 368 | ||
265 | return ret; | 369 | return ret; |
266 | } | 370 | } |
267 | 371 | ||
372 | static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, | ||
373 | uint32_t ta_cmd_id, | ||
374 | uint32_t xgmi_session_id) | ||
375 | { | ||
376 | cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; | ||
377 | cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id; | ||
378 | cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; | ||
379 | /* Note: cmd_invoke_cmd.buf is not used for now */ | ||
380 | } | ||
381 | |||
382 | int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) | ||
383 | { | ||
384 | int ret; | ||
385 | struct psp_gfx_cmd_resp *cmd; | ||
386 | |||
387 | /* | ||
388 | * TODO: bypass the loading in sriov for now | ||
389 | */ | ||
390 | if (amdgpu_sriov_vf(psp->adev)) | ||
391 | return 0; | ||
392 | |||
393 | cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); | ||
394 | if (!cmd) | ||
395 | return -ENOMEM; | ||
396 | |||
397 | psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id, | ||
398 | psp->xgmi_context.session_id); | ||
399 | |||
400 | ret = psp_cmd_submit_buf(psp, NULL, cmd, | ||
401 | psp->fence_buf_mc_addr); | ||
402 | |||
403 | kfree(cmd); | ||
404 | |||
405 | return ret; | ||
406 | } | ||
407 | |||
408 | static int psp_xgmi_terminate(struct psp_context *psp) | ||
409 | { | ||
410 | int ret; | ||
411 | |||
412 | if (!psp->xgmi_context.initialized) | ||
413 | return 0; | ||
414 | |||
415 | ret = psp_xgmi_unload(psp); | ||
416 | if (ret) | ||
417 | return ret; | ||
418 | |||
419 | psp->xgmi_context.initialized = 0; | ||
420 | |||
421 | /* free xgmi shared memory */ | ||
422 | amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo, | ||
423 | &psp->xgmi_context.xgmi_shared_mc_addr, | ||
424 | &psp->xgmi_context.xgmi_shared_buf); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | static int psp_xgmi_initialize(struct psp_context *psp) | ||
430 | { | ||
431 | struct ta_xgmi_shared_memory *xgmi_cmd; | ||
432 | int ret; | ||
433 | |||
434 | if (!psp->xgmi_context.initialized) { | ||
435 | ret = psp_xgmi_init_shared_buf(psp); | ||
436 | if (ret) | ||
437 | return ret; | ||
438 | } | ||
439 | |||
440 | /* Load XGMI TA */ | ||
441 | ret = psp_xgmi_load(psp); | ||
442 | if (ret) | ||
443 | return ret; | ||
444 | |||
445 | /* Initialize XGMI session */ | ||
446 | xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf); | ||
447 | memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); | ||
448 | xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE; | ||
449 | |||
450 | ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); | ||
451 | |||
452 | return ret; | ||
453 | } | ||
454 | |||
268 | static int psp_hw_start(struct psp_context *psp) | 455 | static int psp_hw_start(struct psp_context *psp) |
269 | { | 456 | { |
270 | struct amdgpu_device *adev = psp->adev; | 457 | struct amdgpu_device *adev = psp->adev; |
@@ -292,6 +479,15 @@ static int psp_hw_start(struct psp_context *psp) | |||
292 | if (ret) | 479 | if (ret) |
293 | return ret; | 480 | return ret; |
294 | 481 | ||
482 | if (adev->gmc.xgmi.num_physical_nodes > 1) { | ||
483 | ret = psp_xgmi_initialize(psp); | ||
484 | /* Warning the XGMI seesion initialize failure | ||
485 | * Instead of stop driver initialization | ||
486 | */ | ||
487 | if (ret) | ||
488 | dev_err(psp->adev->dev, | ||
489 | "XGMI: Failed to initialize XGMI session\n"); | ||
490 | } | ||
295 | return 0; | 491 | return 0; |
296 | } | 492 | } |
297 | 493 | ||
@@ -321,7 +517,7 @@ static int psp_np_fw_load(struct psp_context *psp) | |||
321 | return ret; | 517 | return ret; |
322 | 518 | ||
323 | ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, | 519 | ret = psp_cmd_submit_buf(psp, ucode, psp->cmd, |
324 | psp->fence_buf_mc_addr, i + 3); | 520 | psp->fence_buf_mc_addr); |
325 | if (ret) | 521 | if (ret) |
326 | return ret; | 522 | return ret; |
327 | 523 | ||
@@ -452,6 +648,10 @@ static int psp_hw_fini(void *handle) | |||
452 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) | 648 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
453 | return 0; | 649 | return 0; |
454 | 650 | ||
651 | if (adev->gmc.xgmi.num_physical_nodes > 1 && | ||
652 | psp->xgmi_context.initialized == 1) | ||
653 | psp_xgmi_terminate(psp); | ||
654 | |||
455 | psp_ring_destroy(psp, PSP_RING_TYPE__KM); | 655 | psp_ring_destroy(psp, PSP_RING_TYPE__KM); |
456 | 656 | ||
457 | amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); | 657 | amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); |
@@ -479,6 +679,15 @@ static int psp_suspend(void *handle) | |||
479 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) | 679 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
480 | return 0; | 680 | return 0; |
481 | 681 | ||
682 | if (adev->gmc.xgmi.num_physical_nodes > 1 && | ||
683 | psp->xgmi_context.initialized == 1) { | ||
684 | ret = psp_xgmi_terminate(psp); | ||
685 | if (ret) { | ||
686 | DRM_ERROR("Failed to terminate xgmi ta\n"); | ||
687 | return ret; | ||
688 | } | ||
689 | } | ||
690 | |||
482 | ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); | 691 | ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); |
483 | if (ret) { | 692 | if (ret) { |
484 | DRM_ERROR("PSP ring stop failed\n"); | 693 | DRM_ERROR("PSP ring stop failed\n"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 8b8720e9c3f0..9ec5d1a666a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | |||
@@ -27,14 +27,17 @@ | |||
27 | 27 | ||
28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
29 | #include "psp_gfx_if.h" | 29 | #include "psp_gfx_if.h" |
30 | #include "ta_xgmi_if.h" | ||
30 | 31 | ||
31 | #define PSP_FENCE_BUFFER_SIZE 0x1000 | 32 | #define PSP_FENCE_BUFFER_SIZE 0x1000 |
32 | #define PSP_CMD_BUFFER_SIZE 0x1000 | 33 | #define PSP_CMD_BUFFER_SIZE 0x1000 |
33 | #define PSP_ASD_SHARED_MEM_SIZE 0x4000 | 34 | #define PSP_ASD_SHARED_MEM_SIZE 0x4000 |
35 | #define PSP_XGMI_SHARED_MEM_SIZE 0x4000 | ||
34 | #define PSP_1_MEG 0x100000 | 36 | #define PSP_1_MEG 0x100000 |
35 | #define PSP_TMR_SIZE 0x400000 | 37 | #define PSP_TMR_SIZE 0x400000 |
36 | 38 | ||
37 | struct psp_context; | 39 | struct psp_context; |
40 | struct psp_xgmi_node_info; | ||
38 | struct psp_xgmi_topology_info; | 41 | struct psp_xgmi_topology_info; |
39 | 42 | ||
40 | enum psp_ring_type | 43 | enum psp_ring_type |
@@ -80,12 +83,20 @@ struct psp_funcs | |||
80 | enum AMDGPU_UCODE_ID ucode_type); | 83 | enum AMDGPU_UCODE_ID ucode_type); |
81 | bool (*smu_reload_quirk)(struct psp_context *psp); | 84 | bool (*smu_reload_quirk)(struct psp_context *psp); |
82 | int (*mode1_reset)(struct psp_context *psp); | 85 | int (*mode1_reset)(struct psp_context *psp); |
83 | uint64_t (*xgmi_get_device_id)(struct psp_context *psp); | 86 | uint64_t (*xgmi_get_node_id)(struct psp_context *psp); |
84 | uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); | 87 | uint64_t (*xgmi_get_hive_id)(struct psp_context *psp); |
85 | int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, | 88 | int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices, |
86 | struct psp_xgmi_topology_info *topology); | 89 | struct psp_xgmi_topology_info *topology); |
87 | int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, | 90 | int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices, |
88 | struct psp_xgmi_topology_info *topology); | 91 | struct psp_xgmi_topology_info *topology); |
92 | }; | ||
93 | |||
94 | struct psp_xgmi_context { | ||
95 | uint8_t initialized; | ||
96 | uint32_t session_id; | ||
97 | struct amdgpu_bo *xgmi_shared_bo; | ||
98 | uint64_t xgmi_shared_mc_addr; | ||
99 | void *xgmi_shared_buf; | ||
89 | }; | 100 | }; |
90 | 101 | ||
91 | struct psp_context | 102 | struct psp_context |
@@ -96,7 +107,7 @@ struct psp_context | |||
96 | 107 | ||
97 | const struct psp_funcs *funcs; | 108 | const struct psp_funcs *funcs; |
98 | 109 | ||
99 | /* fence buffer */ | 110 | /* firmware buffer */ |
100 | struct amdgpu_bo *fw_pri_bo; | 111 | struct amdgpu_bo *fw_pri_bo; |
101 | uint64_t fw_pri_mc_addr; | 112 | uint64_t fw_pri_mc_addr; |
102 | void *fw_pri_buf; | 113 | void *fw_pri_buf; |
@@ -134,6 +145,16 @@ struct psp_context | |||
134 | struct amdgpu_bo *cmd_buf_bo; | 145 | struct amdgpu_bo *cmd_buf_bo; |
135 | uint64_t cmd_buf_mc_addr; | 146 | uint64_t cmd_buf_mc_addr; |
136 | struct psp_gfx_cmd_resp *cmd_buf_mem; | 147 | struct psp_gfx_cmd_resp *cmd_buf_mem; |
148 | |||
149 | /* fence value associated with cmd buffer */ | ||
150 | atomic_t fence_value; | ||
151 | |||
152 | /* xgmi ta firmware and buffer */ | ||
153 | const struct firmware *ta_fw; | ||
154 | uint32_t ta_xgmi_ucode_version; | ||
155 | uint32_t ta_xgmi_ucode_size; | ||
156 | uint8_t *ta_xgmi_start_addr; | ||
157 | struct psp_xgmi_context xgmi_context; | ||
137 | }; | 158 | }; |
138 | 159 | ||
139 | struct amdgpu_psp_funcs { | 160 | struct amdgpu_psp_funcs { |
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs { | |||
141 | enum AMDGPU_UCODE_ID); | 162 | enum AMDGPU_UCODE_ID); |
142 | }; | 163 | }; |
143 | 164 | ||
165 | #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 | ||
166 | struct psp_xgmi_node_info { | ||
167 | uint64_t node_id; | ||
168 | uint8_t num_hops; | ||
169 | uint8_t is_sharing_enabled; | ||
170 | enum ta_xgmi_assigned_sdma_engine sdma_engine; | ||
171 | }; | ||
172 | |||
144 | struct psp_xgmi_topology_info { | 173 | struct psp_xgmi_topology_info { |
145 | /* Generated by PSP to identify the GPU instance within xgmi connection */ | 174 | uint32_t num_nodes; |
146 | uint64_t device_id; | 175 | struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES]; |
147 | /* | ||
148 | * If all bits set to 0 , driver indicates it wants to retrieve the xgmi | ||
149 | * connection vector topology, but not access enable the connections | ||
150 | * if some or all bits are set to 1, driver indicates it want to retrieve the | ||
151 | * current xgmi topology and access enable the link to GPU[i] associated | ||
152 | * with the bit position in the vector. | ||
153 | * On return,: bits indicated which xgmi links are present/active depending | ||
154 | * on the value passed in. The relative bit offset for the relative GPU index | ||
155 | * within the hive is always marked active. | ||
156 | */ | ||
157 | uint32_t connection_mask; | ||
158 | uint32_t reserved; /* must be 0 */ | ||
159 | }; | 176 | }; |
160 | 177 | ||
161 | #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type)) | 178 | #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type)) |
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info { | |||
177 | ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) | 194 | ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false) |
178 | #define psp_mode1_reset(psp) \ | 195 | #define psp_mode1_reset(psp) \ |
179 | ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) | 196 | ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false) |
180 | #define psp_xgmi_get_device_id(psp) \ | 197 | #define psp_xgmi_get_node_id(psp) \ |
181 | ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0) | 198 | ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0) |
182 | #define psp_xgmi_get_hive_id(psp) \ | 199 | #define psp_xgmi_get_hive_id(psp) \ |
183 | ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) | 200 | ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0) |
184 | #define psp_xgmi_get_topology_info(psp, num_device, topology) \ | 201 | #define psp_xgmi_get_topology_info(psp, num_device, topology) \ |
@@ -199,6 +216,8 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, | |||
199 | extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; | 216 | extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; |
200 | 217 | ||
201 | int psp_gpu_reset(struct amdgpu_device *adev); | 218 | int psp_gpu_reset(struct amdgpu_device *adev); |
219 | int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); | ||
220 | |||
202 | extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; | 221 | extern const struct amdgpu_ip_block_version psp_v11_0_ip_block; |
203 | 222 | ||
204 | #endif | 223 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index b70e85ec147d..5b75bdc8dc28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
338 | */ | 338 | */ |
339 | void amdgpu_ring_fini(struct amdgpu_ring *ring) | 339 | void amdgpu_ring_fini(struct amdgpu_ring *ring) |
340 | { | 340 | { |
341 | ring->ready = false; | 341 | ring->sched.ready = false; |
342 | 342 | ||
343 | /* Not to finish a ring which is not initialized */ | 343 | /* Not to finish a ring which is not initialized */ |
344 | if (!(ring->adev) || !(ring->adev->rings[ring->idx])) | 344 | if (!(ring->adev) || !(ring->adev->rings[ring->idx])) |
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) | |||
500 | debugfs_remove(ring->ent); | 500 | debugfs_remove(ring->ent); |
501 | #endif | 501 | #endif |
502 | } | 502 | } |
503 | |||
504 | /** | ||
505 | * amdgpu_ring_test_helper - tests ring and set sched readiness status | ||
506 | * | ||
507 | * @ring: ring to try the recovery on | ||
508 | * | ||
509 | * Tests ring and set sched readiness status | ||
510 | * | ||
511 | * Returns 0 on success, error on failure. | ||
512 | */ | ||
513 | int amdgpu_ring_test_helper(struct amdgpu_ring *ring) | ||
514 | { | ||
515 | struct amdgpu_device *adev = ring->adev; | ||
516 | int r; | ||
517 | |||
518 | r = amdgpu_ring_test_ring(ring); | ||
519 | if (r) | ||
520 | DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n", | ||
521 | ring->name, r); | ||
522 | else | ||
523 | DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n", | ||
524 | ring->name); | ||
525 | |||
526 | ring->sched.ready = !r; | ||
527 | return r; | ||
528 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4caa301ce454..0beb01fef83f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | |||
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs { | |||
129 | unsigned emit_ib_size; | 129 | unsigned emit_ib_size; |
130 | /* command emit functions */ | 130 | /* command emit functions */ |
131 | void (*emit_ib)(struct amdgpu_ring *ring, | 131 | void (*emit_ib)(struct amdgpu_ring *ring, |
132 | struct amdgpu_job *job, | ||
132 | struct amdgpu_ib *ib, | 133 | struct amdgpu_ib *ib, |
133 | unsigned vmid, bool ctx_switch); | 134 | bool ctx_switch); |
134 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | 135 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, |
135 | uint64_t seq, unsigned flags); | 136 | uint64_t seq, unsigned flags); |
136 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); | 137 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); |
@@ -189,7 +190,6 @@ struct amdgpu_ring { | |||
189 | uint64_t gpu_addr; | 190 | uint64_t gpu_addr; |
190 | uint64_t ptr_mask; | 191 | uint64_t ptr_mask; |
191 | uint32_t buf_mask; | 192 | uint32_t buf_mask; |
192 | bool ready; | ||
193 | u32 idx; | 193 | u32 idx; |
194 | u32 me; | 194 | u32 me; |
195 | u32 pipe; | 195 | u32 pipe; |
@@ -229,7 +229,7 @@ struct amdgpu_ring { | |||
229 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) | 229 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
230 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) | 230 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) |
231 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) | 231 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) |
232 | #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) | 232 | #define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c))) |
233 | #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) | 233 | #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) |
234 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) | 234 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
235 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) | 235 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, | |||
313 | ring->count_dw -= count_dw; | 313 | ring->count_dw -= count_dw; |
314 | } | 314 | } |
315 | 315 | ||
316 | int amdgpu_ring_test_helper(struct amdgpu_ring *ring); | ||
317 | |||
316 | #endif | 318 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c new file mode 100644 index 000000000000..c8793e6cc3c5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * Copyright 2009 Jerome Glisse. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | #include <linux/firmware.h> | ||
26 | #include "amdgpu.h" | ||
27 | #include "amdgpu_gfx.h" | ||
28 | #include "amdgpu_rlc.h" | ||
29 | |||
30 | /** | ||
31 | * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode | ||
32 | * | ||
33 | * @adev: amdgpu_device pointer | ||
34 | * | ||
35 | * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. | ||
36 | */ | ||
37 | void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) | ||
38 | { | ||
39 | if (adev->gfx.rlc.in_safe_mode) | ||
40 | return; | ||
41 | |||
42 | /* if RLC is not enabled, do nothing */ | ||
43 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) | ||
44 | return; | ||
45 | |||
46 | if (adev->cg_flags & | ||
47 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
48 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
49 | adev->gfx.rlc.funcs->set_safe_mode(adev); | ||
50 | adev->gfx.rlc.in_safe_mode = true; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode | ||
56 | * | ||
57 | * @adev: amdgpu_device pointer | ||
58 | * | ||
59 | * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. | ||
60 | */ | ||
61 | void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) | ||
62 | { | ||
63 | if (!(adev->gfx.rlc.in_safe_mode)) | ||
64 | return; | ||
65 | |||
66 | /* if RLC is not enabled, do nothing */ | ||
67 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) | ||
68 | return; | ||
69 | |||
70 | if (adev->cg_flags & | ||
71 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
72 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
73 | adev->gfx.rlc.funcs->unset_safe_mode(adev); | ||
74 | adev->gfx.rlc.in_safe_mode = false; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * amdgpu_gfx_rlc_init_sr - Init save restore block | ||
80 | * | ||
81 | * @adev: amdgpu_device pointer | ||
82 | * @dws: the size of save restore block | ||
83 | * | ||
84 | * Allocate and setup value to save restore block of rlc. | ||
85 | * Returns 0 on succeess or negative error code if allocate failed. | ||
86 | */ | ||
87 | int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) | ||
88 | { | ||
89 | const u32 *src_ptr; | ||
90 | volatile u32 *dst_ptr; | ||
91 | u32 i; | ||
92 | int r; | ||
93 | |||
94 | /* allocate save restore block */ | ||
95 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
96 | AMDGPU_GEM_DOMAIN_VRAM, | ||
97 | &adev->gfx.rlc.save_restore_obj, | ||
98 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
99 | (void **)&adev->gfx.rlc.sr_ptr); | ||
100 | if (r) { | ||
101 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); | ||
102 | amdgpu_gfx_rlc_fini(adev); | ||
103 | return r; | ||
104 | } | ||
105 | |||
106 | /* write the sr buffer */ | ||
107 | src_ptr = adev->gfx.rlc.reg_list; | ||
108 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
109 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
110 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
111 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
112 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * amdgpu_gfx_rlc_init_csb - Init clear state block | ||
119 | * | ||
120 | * @adev: amdgpu_device pointer | ||
121 | * | ||
122 | * Allocate and setup value to clear state block of rlc. | ||
123 | * Returns 0 on succeess or negative error code if allocate failed. | ||
124 | */ | ||
125 | int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) | ||
126 | { | ||
127 | volatile u32 *dst_ptr; | ||
128 | u32 dws; | ||
129 | int r; | ||
130 | |||
131 | /* allocate clear state block */ | ||
132 | adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); | ||
133 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
134 | AMDGPU_GEM_DOMAIN_VRAM, | ||
135 | &adev->gfx.rlc.clear_state_obj, | ||
136 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
137 | (void **)&adev->gfx.rlc.cs_ptr); | ||
138 | if (r) { | ||
139 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); | ||
140 | amdgpu_gfx_rlc_fini(adev); | ||
141 | return r; | ||
142 | } | ||
143 | |||
144 | /* set up the cs buffer */ | ||
145 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
146 | adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); | ||
147 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
148 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
149 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * amdgpu_gfx_rlc_init_cpt - Init cp table | ||
156 | * | ||
157 | * @adev: amdgpu_device pointer | ||
158 | * | ||
159 | * Allocate and setup value to cp table of rlc. | ||
160 | * Returns 0 on succeess or negative error code if allocate failed. | ||
161 | */ | ||
162 | int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) | ||
163 | { | ||
164 | int r; | ||
165 | |||
166 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | ||
167 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | ||
168 | &adev->gfx.rlc.cp_table_obj, | ||
169 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
170 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
171 | if (r) { | ||
172 | dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); | ||
173 | amdgpu_gfx_rlc_fini(adev); | ||
174 | return r; | ||
175 | } | ||
176 | |||
177 | /* set up the cp table */ | ||
178 | amdgpu_gfx_rlc_setup_cp_table(adev); | ||
179 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
180 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table | ||
187 | * | ||
188 | * @adev: amdgpu_device pointer | ||
189 | * | ||
190 | * Write cp firmware data into cp table. | ||
191 | */ | ||
192 | void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) | ||
193 | { | ||
194 | const __le32 *fw_data; | ||
195 | volatile u32 *dst_ptr; | ||
196 | int me, i, max_me; | ||
197 | u32 bo_offset = 0; | ||
198 | u32 table_offset, table_size; | ||
199 | |||
200 | max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); | ||
201 | |||
202 | /* write the cp table buffer */ | ||
203 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
204 | for (me = 0; me < max_me; me++) { | ||
205 | if (me == 0) { | ||
206 | const struct gfx_firmware_header_v1_0 *hdr = | ||
207 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
208 | fw_data = (const __le32 *) | ||
209 | (adev->gfx.ce_fw->data + | ||
210 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
211 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
212 | table_size = le32_to_cpu(hdr->jt_size); | ||
213 | } else if (me == 1) { | ||
214 | const struct gfx_firmware_header_v1_0 *hdr = | ||
215 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
216 | fw_data = (const __le32 *) | ||
217 | (adev->gfx.pfp_fw->data + | ||
218 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
219 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
220 | table_size = le32_to_cpu(hdr->jt_size); | ||
221 | } else if (me == 2) { | ||
222 | const struct gfx_firmware_header_v1_0 *hdr = | ||
223 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
224 | fw_data = (const __le32 *) | ||
225 | (adev->gfx.me_fw->data + | ||
226 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
227 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
228 | table_size = le32_to_cpu(hdr->jt_size); | ||
229 | } else if (me == 3) { | ||
230 | const struct gfx_firmware_header_v1_0 *hdr = | ||
231 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
232 | fw_data = (const __le32 *) | ||
233 | (adev->gfx.mec_fw->data + | ||
234 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
235 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
236 | table_size = le32_to_cpu(hdr->jt_size); | ||
237 | } else if (me == 4) { | ||
238 | const struct gfx_firmware_header_v1_0 *hdr = | ||
239 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
240 | fw_data = (const __le32 *) | ||
241 | (adev->gfx.mec2_fw->data + | ||
242 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
243 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
244 | table_size = le32_to_cpu(hdr->jt_size); | ||
245 | } | ||
246 | |||
247 | for (i = 0; i < table_size; i ++) { | ||
248 | dst_ptr[bo_offset + i] = | ||
249 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
250 | } | ||
251 | |||
252 | bo_offset += table_size; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * amdgpu_gfx_rlc_fini - Free BO which used for RLC | ||
258 | * | ||
259 | * @adev: amdgpu_device pointer | ||
260 | * | ||
261 | * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block | ||
262 | * and rlc_jump_table_block. | ||
263 | */ | ||
264 | void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev) | ||
265 | { | ||
266 | /* save restore block */ | ||
267 | if (adev->gfx.rlc.save_restore_obj) { | ||
268 | amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, | ||
269 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
270 | (void **)&adev->gfx.rlc.sr_ptr); | ||
271 | } | ||
272 | |||
273 | /* clear state block */ | ||
274 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, | ||
275 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
276 | (void **)&adev->gfx.rlc.cs_ptr); | ||
277 | |||
278 | /* jump table block */ | ||
279 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, | ||
280 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
281 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
282 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h new file mode 100644 index 000000000000..49a8ab52113b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __AMDGPU_RLC_H__ | ||
25 | #define __AMDGPU_RLC_H__ | ||
26 | |||
27 | #include "clearstate_defs.h" | ||
28 | |||
29 | struct amdgpu_rlc_funcs { | ||
30 | bool (*is_rlc_enabled)(struct amdgpu_device *adev); | ||
31 | void (*set_safe_mode)(struct amdgpu_device *adev); | ||
32 | void (*unset_safe_mode)(struct amdgpu_device *adev); | ||
33 | int (*init)(struct amdgpu_device *adev); | ||
34 | u32 (*get_csb_size)(struct amdgpu_device *adev); | ||
35 | void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); | ||
36 | int (*get_cp_table_num)(struct amdgpu_device *adev); | ||
37 | int (*resume)(struct amdgpu_device *adev); | ||
38 | void (*stop)(struct amdgpu_device *adev); | ||
39 | void (*reset)(struct amdgpu_device *adev); | ||
40 | void (*start)(struct amdgpu_device *adev); | ||
41 | }; | ||
42 | |||
43 | struct amdgpu_rlc { | ||
44 | /* for power gating */ | ||
45 | struct amdgpu_bo *save_restore_obj; | ||
46 | uint64_t save_restore_gpu_addr; | ||
47 | volatile uint32_t *sr_ptr; | ||
48 | const u32 *reg_list; | ||
49 | u32 reg_list_size; | ||
50 | /* for clear state */ | ||
51 | struct amdgpu_bo *clear_state_obj; | ||
52 | uint64_t clear_state_gpu_addr; | ||
53 | volatile uint32_t *cs_ptr; | ||
54 | const struct cs_section_def *cs_data; | ||
55 | u32 clear_state_size; | ||
56 | /* for cp tables */ | ||
57 | struct amdgpu_bo *cp_table_obj; | ||
58 | uint64_t cp_table_gpu_addr; | ||
59 | volatile uint32_t *cp_table_ptr; | ||
60 | u32 cp_table_size; | ||
61 | |||
62 | /* safe mode for updating CG/PG state */ | ||
63 | bool in_safe_mode; | ||
64 | const struct amdgpu_rlc_funcs *funcs; | ||
65 | |||
66 | /* for firmware data */ | ||
67 | u32 save_and_restore_offset; | ||
68 | u32 clear_state_descriptor_offset; | ||
69 | u32 avail_scratch_ram_locations; | ||
70 | u32 reg_restore_list_size; | ||
71 | u32 reg_list_format_start; | ||
72 | u32 reg_list_format_separate_start; | ||
73 | u32 starting_offsets_start; | ||
74 | u32 reg_list_format_size_bytes; | ||
75 | u32 reg_list_size_bytes; | ||
76 | u32 reg_list_format_direct_reg_list_length; | ||
77 | u32 save_restore_list_cntl_size_bytes; | ||
78 | u32 save_restore_list_gpm_size_bytes; | ||
79 | u32 save_restore_list_srm_size_bytes; | ||
80 | |||
81 | u32 *register_list_format; | ||
82 | u32 *register_restore; | ||
83 | u8 *save_restore_list_cntl; | ||
84 | u8 *save_restore_list_gpm; | ||
85 | u8 *save_restore_list_srm; | ||
86 | |||
87 | bool is_rlc_v2_1; | ||
88 | }; | ||
89 | |||
90 | void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev); | ||
91 | void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev); | ||
92 | int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws); | ||
93 | int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); | ||
94 | int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); | ||
95 | void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev); | ||
96 | void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); | ||
97 | |||
98 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index bc9244b429ef..115bb0c99b0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c | |||
@@ -28,17 +28,31 @@ | |||
28 | * GPU SDMA IP block helpers function. | 28 | * GPU SDMA IP block helpers function. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | 31 | struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring) |
32 | { | 32 | { |
33 | struct amdgpu_device *adev = ring->adev; | 33 | struct amdgpu_device *adev = ring->adev; |
34 | int i; | 34 | int i; |
35 | 35 | ||
36 | for (i = 0; i < adev->sdma.num_instances; i++) | 36 | for (i = 0; i < adev->sdma.num_instances; i++) |
37 | if (&adev->sdma.instance[i].ring == ring) | 37 | if (ring == &adev->sdma.instance[i].ring || |
38 | break; | 38 | ring == &adev->sdma.instance[i].page) |
39 | return &adev->sdma.instance[i]; | ||
39 | 40 | ||
40 | if (i < AMDGPU_MAX_SDMA_INSTANCES) | 41 | return NULL; |
41 | return &adev->sdma.instance[i]; | 42 | } |
42 | else | 43 | |
43 | return NULL; | 44 | int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index) |
45 | { | ||
46 | struct amdgpu_device *adev = ring->adev; | ||
47 | int i; | ||
48 | |||
49 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
50 | if (ring == &adev->sdma.instance[i].ring || | ||
51 | ring == &adev->sdma.instance[i].page) { | ||
52 | *index = i; | ||
53 | return 0; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | return -EINVAL; | ||
44 | } | 58 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index 500113ec65ca..16b1a6ae5ba6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | |||
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance { | |||
41 | uint32_t feature_version; | 41 | uint32_t feature_version; |
42 | 42 | ||
43 | struct amdgpu_ring ring; | 43 | struct amdgpu_ring ring; |
44 | struct amdgpu_ring page; | ||
44 | bool burst_nop; | 45 | bool burst_nop; |
45 | }; | 46 | }; |
46 | 47 | ||
@@ -50,6 +51,7 @@ struct amdgpu_sdma { | |||
50 | struct amdgpu_irq_src illegal_inst_irq; | 51 | struct amdgpu_irq_src illegal_inst_irq; |
51 | int num_instances; | 52 | int num_instances; |
52 | uint32_t srbm_soft_reset; | 53 | uint32_t srbm_soft_reset; |
54 | bool has_page_queue; | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | /* | 57 | /* |
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs { | |||
92 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) | 94 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
93 | 95 | ||
94 | struct amdgpu_sdma_instance * | 96 | struct amdgpu_sdma_instance * |
95 | amdgpu_get_sdma_instance(struct amdgpu_ring *ring); | 97 | amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); |
98 | int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); | ||
96 | 99 | ||
97 | #endif | 100 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index e9bf70e2ac51..626abca770a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id, | |||
218 | TP_ARGS(vm, ring, job), | 218 | TP_ARGS(vm, ring, job), |
219 | TP_STRUCT__entry( | 219 | TP_STRUCT__entry( |
220 | __field(u32, pasid) | 220 | __field(u32, pasid) |
221 | __string(ring, ring->name) | ||
221 | __field(u32, ring) | 222 | __field(u32, ring) |
222 | __field(u32, vmid) | 223 | __field(u32, vmid) |
223 | __field(u32, vm_hub) | 224 | __field(u32, vm_hub) |
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id, | |||
227 | 228 | ||
228 | TP_fast_assign( | 229 | TP_fast_assign( |
229 | __entry->pasid = vm->pasid; | 230 | __entry->pasid = vm->pasid; |
230 | __entry->ring = ring->idx; | 231 | __assign_str(ring, ring->name) |
231 | __entry->vmid = job->vmid; | 232 | __entry->vmid = job->vmid; |
232 | __entry->vm_hub = ring->funcs->vmhub, | 233 | __entry->vm_hub = ring->funcs->vmhub, |
233 | __entry->pd_addr = job->vm_pd_addr; | 234 | __entry->pd_addr = job->vm_pd_addr; |
234 | __entry->needs_flush = job->vm_needs_flush; | 235 | __entry->needs_flush = job->vm_needs_flush; |
235 | ), | 236 | ), |
236 | TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", | 237 | TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", |
237 | __entry->pasid, __entry->ring, __entry->vmid, | 238 | __entry->pasid, __get_str(ring), __entry->vmid, |
238 | __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) | 239 | __entry->vm_hub, __entry->pd_addr, __entry->needs_flush) |
239 | ); | 240 | ); |
240 | 241 | ||
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush, | |||
366 | uint64_t pd_addr), | 367 | uint64_t pd_addr), |
367 | TP_ARGS(ring, vmid, pd_addr), | 368 | TP_ARGS(ring, vmid, pd_addr), |
368 | TP_STRUCT__entry( | 369 | TP_STRUCT__entry( |
369 | __field(u32, ring) | 370 | __string(ring, ring->name) |
370 | __field(u32, vmid) | 371 | __field(u32, vmid) |
371 | __field(u32, vm_hub) | 372 | __field(u32, vm_hub) |
372 | __field(u64, pd_addr) | 373 | __field(u64, pd_addr) |
373 | ), | 374 | ), |
374 | 375 | ||
375 | TP_fast_assign( | 376 | TP_fast_assign( |
376 | __entry->ring = ring->idx; | 377 | __assign_str(ring, ring->name) |
377 | __entry->vmid = vmid; | 378 | __entry->vmid = vmid; |
378 | __entry->vm_hub = ring->funcs->vmhub; | 379 | __entry->vm_hub = ring->funcs->vmhub; |
379 | __entry->pd_addr = pd_addr; | 380 | __entry->pd_addr = pd_addr; |
380 | ), | 381 | ), |
381 | TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx", | 382 | TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx", |
382 | __entry->ring, __entry->vmid, | 383 | __get_str(ring), __entry->vmid, |
383 | __entry->vm_hub,__entry->pd_addr) | 384 | __entry->vm_hub,__entry->pd_addr) |
384 | ); | 385 | ); |
385 | 386 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index a44fc12ae1f9..c91ec3101d00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, | |||
61 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | 61 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); |
62 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | 62 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); |
63 | 63 | ||
64 | /* | ||
65 | * Global memory. | ||
66 | */ | ||
67 | |||
68 | /** | ||
69 | * amdgpu_ttm_mem_global_init - Initialize and acquire reference to | ||
70 | * memory object | ||
71 | * | ||
72 | * @ref: Object for initialization. | ||
73 | * | ||
74 | * This is called by drm_global_item_ref() when an object is being | ||
75 | * initialized. | ||
76 | */ | ||
77 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | ||
78 | { | ||
79 | return ttm_mem_global_init(ref->object); | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * amdgpu_ttm_mem_global_release - Drop reference to a memory object | ||
84 | * | ||
85 | * @ref: Object being removed | ||
86 | * | ||
87 | * This is called by drm_global_item_unref() when an object is being | ||
88 | * released. | ||
89 | */ | ||
90 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | ||
91 | { | ||
92 | ttm_mem_global_release(ref->object); | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * amdgpu_ttm_global_init - Initialize global TTM memory reference structures. | ||
97 | * | ||
98 | * @adev: AMDGPU device for which the global structures need to be registered. | ||
99 | * | ||
100 | * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() | ||
101 | * during bring up. | ||
102 | */ | ||
103 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | ||
104 | { | ||
105 | struct drm_global_reference *global_ref; | ||
106 | int r; | ||
107 | |||
108 | /* ensure reference is false in case init fails */ | ||
109 | adev->mman.mem_global_referenced = false; | ||
110 | |||
111 | global_ref = &adev->mman.mem_global_ref; | ||
112 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
113 | global_ref->size = sizeof(struct ttm_mem_global); | ||
114 | global_ref->init = &amdgpu_ttm_mem_global_init; | ||
115 | global_ref->release = &amdgpu_ttm_mem_global_release; | ||
116 | r = drm_global_item_ref(global_ref); | ||
117 | if (r) { | ||
118 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
119 | "subsystem.\n"); | ||
120 | goto error_mem; | ||
121 | } | ||
122 | |||
123 | adev->mman.bo_global_ref.mem_glob = | ||
124 | adev->mman.mem_global_ref.object; | ||
125 | global_ref = &adev->mman.bo_global_ref.ref; | ||
126 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
127 | global_ref->size = sizeof(struct ttm_bo_global); | ||
128 | global_ref->init = &ttm_bo_global_init; | ||
129 | global_ref->release = &ttm_bo_global_release; | ||
130 | r = drm_global_item_ref(global_ref); | ||
131 | if (r) { | ||
132 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
133 | goto error_bo; | ||
134 | } | ||
135 | |||
136 | mutex_init(&adev->mman.gtt_window_lock); | ||
137 | |||
138 | adev->mman.mem_global_referenced = true; | ||
139 | |||
140 | return 0; | ||
141 | |||
142 | error_bo: | ||
143 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
144 | error_mem: | ||
145 | return r; | ||
146 | } | ||
147 | |||
148 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | ||
149 | { | ||
150 | if (adev->mman.mem_global_referenced) { | ||
151 | mutex_destroy(&adev->mman.gtt_window_lock); | ||
152 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | ||
153 | drm_global_item_unref(&adev->mman.mem_global_ref); | ||
154 | adev->mman.mem_global_referenced = false; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | 64 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
159 | { | 65 | { |
160 | return 0; | 66 | return 0; |
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1758 | int r; | 1664 | int r; |
1759 | u64 vis_vram_limit; | 1665 | u64 vis_vram_limit; |
1760 | 1666 | ||
1761 | /* initialize global references for vram/gtt */ | 1667 | mutex_init(&adev->mman.gtt_window_lock); |
1762 | r = amdgpu_ttm_global_init(adev); | 1668 | |
1763 | if (r) { | ||
1764 | return r; | ||
1765 | } | ||
1766 | /* No others user of address space so set it to 0 */ | 1669 | /* No others user of address space so set it to 0 */ |
1767 | r = ttm_bo_device_init(&adev->mman.bdev, | 1670 | r = ttm_bo_device_init(&adev->mman.bdev, |
1768 | adev->mman.bo_global_ref.ref.object, | ||
1769 | &amdgpu_bo_driver, | 1671 | &amdgpu_bo_driver, |
1770 | adev->ddev->anon_inode->i_mapping, | 1672 | adev->ddev->anon_inode->i_mapping, |
1771 | DRM_FILE_PAGE_OFFSET, | 1673 | DRM_FILE_PAGE_OFFSET, |
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) | |||
1922 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | 1824 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); |
1923 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | 1825 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); |
1924 | ttm_bo_device_release(&adev->mman.bdev); | 1826 | ttm_bo_device_release(&adev->mman.bdev); |
1925 | amdgpu_ttm_global_fini(adev); | ||
1926 | adev->mman.initialized = false; | 1827 | adev->mman.initialized = false; |
1927 | DRM_INFO("amdgpu: ttm finalized\n"); | 1828 | DRM_INFO("amdgpu: ttm finalized\n"); |
1928 | } | 1829 | } |
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, | |||
2069 | unsigned i; | 1970 | unsigned i; |
2070 | int r; | 1971 | int r; |
2071 | 1972 | ||
2072 | if (direct_submit && !ring->ready) { | 1973 | if (direct_submit && !ring->sched.ready) { |
2073 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | 1974 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
2074 | return -EINVAL; | 1975 | return -EINVAL; |
2075 | } | 1976 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index fe8f276e9811..b5b2d101f7db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -39,8 +39,6 @@ | |||
39 | #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 | 39 | #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 |
40 | 40 | ||
41 | struct amdgpu_mman { | 41 | struct amdgpu_mman { |
42 | struct ttm_bo_global_ref bo_global_ref; | ||
43 | struct drm_global_reference mem_global_ref; | ||
44 | struct ttm_bo_device bdev; | 42 | struct ttm_bo_device bdev; |
45 | bool mem_global_referenced; | 43 | bool mem_global_referenced; |
46 | bool initialized; | 44 | bool initialized; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index aa6641b944a0..7ac25a1c7853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | |||
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* version_major=1, version_minor=0 */ | 60 | /* version_major=1, version_minor=0 */ |
61 | struct ta_firmware_header_v1_0 { | ||
62 | struct common_firmware_header header; | ||
63 | uint32_t ta_xgmi_ucode_version; | ||
64 | uint32_t ta_xgmi_offset_bytes; | ||
65 | uint32_t ta_xgmi_size_bytes; | ||
66 | uint32_t ta_ras_ucode_version; | ||
67 | uint32_t ta_ras_offset_bytes; | ||
68 | uint32_t ta_ras_size_bytes; | ||
69 | }; | ||
70 | |||
71 | /* version_major=1, version_minor=0 */ | ||
61 | struct gfx_firmware_header_v1_0 { | 72 | struct gfx_firmware_header_v1_0 { |
62 | struct common_firmware_header header; | 73 | struct common_firmware_header header; |
63 | uint32_t ucode_feature_version; | 74 | uint32_t ucode_feature_version; |
@@ -170,6 +181,7 @@ union amdgpu_firmware_header { | |||
170 | struct mc_firmware_header_v1_0 mc; | 181 | struct mc_firmware_header_v1_0 mc; |
171 | struct smc_firmware_header_v1_0 smc; | 182 | struct smc_firmware_header_v1_0 smc; |
172 | struct psp_firmware_header_v1_0 psp; | 183 | struct psp_firmware_header_v1_0 psp; |
184 | struct ta_firmware_header_v1_0 ta; | ||
173 | struct gfx_firmware_header_v1_0 gfx; | 185 | struct gfx_firmware_header_v1_0 gfx; |
174 | struct rlc_firmware_header_v1_0 rlc; | 186 | struct rlc_firmware_header_v1_0 rlc; |
175 | struct rlc_firmware_header_v2_0 rlc_v2_0; | 187 | struct rlc_firmware_header_v2_0 rlc_v2_0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e5a6db6beab7..69896f451e8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1243,30 +1243,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1243 | { | 1243 | { |
1244 | struct dma_fence *fence; | 1244 | struct dma_fence *fence; |
1245 | long r; | 1245 | long r; |
1246 | uint32_t ip_instance = ring->me; | ||
1247 | 1246 | ||
1248 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | 1247 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); |
1249 | if (r) { | 1248 | if (r) |
1250 | DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); | ||
1251 | goto error; | 1249 | goto error; |
1252 | } | ||
1253 | 1250 | ||
1254 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); | 1251 | r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); |
1255 | if (r) { | 1252 | if (r) |
1256 | DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); | ||
1257 | goto error; | 1253 | goto error; |
1258 | } | ||
1259 | 1254 | ||
1260 | r = dma_fence_wait_timeout(fence, false, timeout); | 1255 | r = dma_fence_wait_timeout(fence, false, timeout); |
1261 | if (r == 0) { | 1256 | if (r == 0) |
1262 | DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); | ||
1263 | r = -ETIMEDOUT; | 1257 | r = -ETIMEDOUT; |
1264 | } else if (r < 0) { | 1258 | else if (r > 0) |
1265 | DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); | ||
1266 | } else { | ||
1267 | DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); | ||
1268 | r = 0; | 1259 | r = 0; |
1269 | } | ||
1270 | 1260 | ||
1271 | dma_fence_put(fence); | 1261 | dma_fence_put(fence); |
1272 | 1262 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 5f3f54073818..98a1b2ce2b9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -1032,8 +1032,10 @@ out: | |||
1032 | * @ib: the IB to execute | 1032 | * @ib: the IB to execute |
1033 | * | 1033 | * |
1034 | */ | 1034 | */ |
1035 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, | 1035 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, |
1036 | unsigned vmid, bool ctx_switch) | 1036 | struct amdgpu_job *job, |
1037 | struct amdgpu_ib *ib, | ||
1038 | bool ctx_switch) | ||
1037 | { | 1039 | { |
1038 | amdgpu_ring_write(ring, VCE_CMD_IB); | 1040 | amdgpu_ring_write(ring, VCE_CMD_IB); |
1039 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 1041 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
1079 | return 0; | 1081 | return 0; |
1080 | 1082 | ||
1081 | r = amdgpu_ring_alloc(ring, 16); | 1083 | r = amdgpu_ring_alloc(ring, 16); |
1082 | if (r) { | 1084 | if (r) |
1083 | DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n", | ||
1084 | ring->idx, r); | ||
1085 | return r; | 1085 | return r; |
1086 | } | 1086 | |
1087 | amdgpu_ring_write(ring, VCE_CMD_END); | 1087 | amdgpu_ring_write(ring, VCE_CMD_END); |
1088 | amdgpu_ring_commit(ring); | 1088 | amdgpu_ring_commit(ring); |
1089 | 1089 | ||
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
1093 | DRM_UDELAY(1); | 1093 | DRM_UDELAY(1); |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | if (i < timeout) { | 1096 | if (i >= timeout) |
1097 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | ||
1098 | ring->idx, i); | ||
1099 | } else { | ||
1100 | DRM_ERROR("amdgpu: ring %d test failed\n", | ||
1101 | ring->idx); | ||
1102 | r = -ETIMEDOUT; | 1097 | r = -ETIMEDOUT; |
1103 | } | ||
1104 | 1098 | ||
1105 | return r; | 1099 | return r; |
1106 | } | 1100 | } |
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1121 | return 0; | 1115 | return 0; |
1122 | 1116 | ||
1123 | r = amdgpu_vce_get_create_msg(ring, 1, NULL); | 1117 | r = amdgpu_vce_get_create_msg(ring, 1, NULL); |
1124 | if (r) { | 1118 | if (r) |
1125 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | ||
1126 | goto error; | 1119 | goto error; |
1127 | } | ||
1128 | 1120 | ||
1129 | r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); | 1121 | r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); |
1130 | if (r) { | 1122 | if (r) |
1131 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | ||
1132 | goto error; | 1123 | goto error; |
1133 | } | ||
1134 | 1124 | ||
1135 | r = dma_fence_wait_timeout(fence, false, timeout); | 1125 | r = dma_fence_wait_timeout(fence, false, timeout); |
1136 | if (r == 0) { | 1126 | if (r == 0) |
1137 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
1138 | r = -ETIMEDOUT; | 1127 | r = -ETIMEDOUT; |
1139 | } else if (r < 0) { | 1128 | else if (r > 0) |
1140 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
1141 | } else { | ||
1142 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
1143 | r = 0; | 1129 | r = 0; |
1144 | } | 1130 | |
1145 | error: | 1131 | error: |
1146 | dma_fence_put(fence); | 1132 | dma_fence_put(fence); |
1147 | return r; | 1133 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index a1f209eed4c4..50293652af14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h | |||
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
65 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); | 65 | void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); |
66 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 66 | int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
67 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 67 | int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
68 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, | 68 | void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, |
69 | unsigned vmid, bool ctx_switch); | 69 | struct amdgpu_ib *ib, bool ctx_switch); |
70 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | 70 | void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
71 | unsigned flags); | 71 | unsigned flags); |
72 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); | 72 | int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 27da13df2f11..e2e42e3fbcf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |||
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) | |||
425 | 425 | ||
426 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); | 426 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); |
427 | r = amdgpu_ring_alloc(ring, 3); | 427 | r = amdgpu_ring_alloc(ring, 3); |
428 | if (r) { | 428 | if (r) |
429 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
430 | ring->idx, r); | ||
431 | return r; | 429 | return r; |
432 | } | 430 | |
433 | amdgpu_ring_write(ring, | 431 | amdgpu_ring_write(ring, |
434 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0)); | 432 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0)); |
435 | amdgpu_ring_write(ring, 0xDEADBEEF); | 433 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) | |||
441 | DRM_UDELAY(1); | 439 | DRM_UDELAY(1); |
442 | } | 440 | } |
443 | 441 | ||
444 | if (i < adev->usec_timeout) { | 442 | if (i >= adev->usec_timeout) |
445 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 443 | r = -ETIMEDOUT; |
446 | ring->idx, i); | 444 | |
447 | } else { | ||
448 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
449 | ring->idx, tmp); | ||
450 | r = -EINVAL; | ||
451 | } | ||
452 | return r; | 445 | return r; |
453 | } | 446 | } |
454 | 447 | ||
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
570 | long r; | 563 | long r; |
571 | 564 | ||
572 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); | 565 | r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); |
573 | if (r) { | 566 | if (r) |
574 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | ||
575 | goto error; | 567 | goto error; |
576 | } | ||
577 | 568 | ||
578 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); | 569 | r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); |
579 | if (r) { | 570 | if (r) |
580 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | ||
581 | goto error; | 571 | goto error; |
582 | } | ||
583 | 572 | ||
584 | r = dma_fence_wait_timeout(fence, false, timeout); | 573 | r = dma_fence_wait_timeout(fence, false, timeout); |
585 | if (r == 0) { | 574 | if (r == 0) |
586 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
587 | r = -ETIMEDOUT; | 575 | r = -ETIMEDOUT; |
588 | } else if (r < 0) { | 576 | else if (r > 0) |
589 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
590 | } else { | ||
591 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
592 | r = 0; | 577 | r = 0; |
593 | } | ||
594 | 578 | ||
595 | dma_fence_put(fence); | 579 | dma_fence_put(fence); |
596 | |||
597 | error: | 580 | error: |
598 | return r; | 581 | return r; |
599 | } | 582 | } |
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
606 | int r; | 589 | int r; |
607 | 590 | ||
608 | r = amdgpu_ring_alloc(ring, 16); | 591 | r = amdgpu_ring_alloc(ring, 16); |
609 | if (r) { | 592 | if (r) |
610 | DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n", | ||
611 | ring->idx, r); | ||
612 | return r; | 593 | return r; |
613 | } | 594 | |
614 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); | 595 | amdgpu_ring_write(ring, VCN_ENC_CMD_END); |
615 | amdgpu_ring_commit(ring); | 596 | amdgpu_ring_commit(ring); |
616 | 597 | ||
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
620 | DRM_UDELAY(1); | 601 | DRM_UDELAY(1); |
621 | } | 602 | } |
622 | 603 | ||
623 | if (i < adev->usec_timeout) { | 604 | if (i >= adev->usec_timeout) |
624 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | ||
625 | ring->idx, i); | ||
626 | } else { | ||
627 | DRM_ERROR("amdgpu: ring %d test failed\n", | ||
628 | ring->idx); | ||
629 | r = -ETIMEDOUT; | 605 | r = -ETIMEDOUT; |
630 | } | ||
631 | 606 | ||
632 | return r; | 607 | return r; |
633 | } | 608 | } |
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
742 | long r; | 717 | long r; |
743 | 718 | ||
744 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); | 719 | r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL); |
745 | if (r) { | 720 | if (r) |
746 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | ||
747 | goto error; | 721 | goto error; |
748 | } | ||
749 | 722 | ||
750 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); | 723 | r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence); |
751 | if (r) { | 724 | if (r) |
752 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | ||
753 | goto error; | 725 | goto error; |
754 | } | ||
755 | 726 | ||
756 | r = dma_fence_wait_timeout(fence, false, timeout); | 727 | r = dma_fence_wait_timeout(fence, false, timeout); |
757 | if (r == 0) { | 728 | if (r == 0) |
758 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
759 | r = -ETIMEDOUT; | 729 | r = -ETIMEDOUT; |
760 | } else if (r < 0) { | 730 | else if (r > 0) |
761 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
762 | } else { | ||
763 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
764 | r = 0; | 731 | r = 0; |
765 | } | 732 | |
766 | error: | 733 | error: |
767 | dma_fence_put(fence); | 734 | dma_fence_put(fence); |
768 | return r; | 735 | return r; |
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) | |||
778 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); | 745 | WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); |
779 | r = amdgpu_ring_alloc(ring, 3); | 746 | r = amdgpu_ring_alloc(ring, 3); |
780 | 747 | ||
781 | if (r) { | 748 | if (r) |
782 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
783 | ring->idx, r); | ||
784 | return r; | 749 | return r; |
785 | } | ||
786 | 750 | ||
787 | amdgpu_ring_write(ring, | 751 | amdgpu_ring_write(ring, |
788 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0)); | 752 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0)); |
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) | |||
796 | DRM_UDELAY(1); | 760 | DRM_UDELAY(1); |
797 | } | 761 | } |
798 | 762 | ||
799 | if (i < adev->usec_timeout) { | 763 | if (i >= adev->usec_timeout) |
800 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 764 | r = -ETIMEDOUT; |
801 | ring->idx, i); | ||
802 | } else { | ||
803 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
804 | ring->idx, tmp); | ||
805 | r = -EINVAL; | ||
806 | } | ||
807 | 765 | ||
808 | return r; | 766 | return r; |
809 | } | 767 | } |
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
856 | long r = 0; | 814 | long r = 0; |
857 | 815 | ||
858 | r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); | 816 | r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence); |
859 | if (r) { | 817 | if (r) |
860 | DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r); | ||
861 | goto error; | 818 | goto error; |
862 | } | ||
863 | 819 | ||
864 | r = dma_fence_wait_timeout(fence, false, timeout); | 820 | r = dma_fence_wait_timeout(fence, false, timeout); |
865 | if (r == 0) { | 821 | if (r == 0) { |
866 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
867 | r = -ETIMEDOUT; | 822 | r = -ETIMEDOUT; |
868 | goto error; | 823 | goto error; |
869 | } else if (r < 0) { | 824 | } else if (r < 0) { |
870 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
871 | goto error; | 825 | goto error; |
872 | } else | 826 | } else { |
873 | r = 0; | 827 | r = 0; |
828 | } | ||
874 | 829 | ||
875 | for (i = 0; i < adev->usec_timeout; i++) { | 830 | for (i = 0; i < adev->usec_timeout; i++) { |
876 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); | 831 | tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); |
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
879 | DRM_UDELAY(1); | 834 | DRM_UDELAY(1); |
880 | } | 835 | } |
881 | 836 | ||
882 | if (i < adev->usec_timeout) | 837 | if (i >= adev->usec_timeout) |
883 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | 838 | r = -ETIMEDOUT; |
884 | else { | ||
885 | DRM_ERROR("ib test failed (0x%08X)\n", tmp); | ||
886 | r = -EINVAL; | ||
887 | } | ||
888 | 839 | ||
889 | dma_fence_put(fence); | 840 | dma_fence_put(fence); |
890 | |||
891 | error: | 841 | error: |
892 | return r; | 842 | return r; |
893 | } | 843 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index f2f358aa0597..cfee74732edb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | |||
@@ -23,16 +23,6 @@ | |||
23 | 23 | ||
24 | #include "amdgpu.h" | 24 | #include "amdgpu.h" |
25 | 25 | ||
26 | uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) | ||
27 | { | ||
28 | uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; | ||
29 | |||
30 | addr -= AMDGPU_VA_RESERVED_SIZE; | ||
31 | addr = amdgpu_gmc_sign_extend(addr); | ||
32 | |||
33 | return addr; | ||
34 | } | ||
35 | |||
36 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) | 26 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) |
37 | { | 27 | { |
38 | /* By now all MMIO pages except mailbox are blocked */ | 28 | /* By now all MMIO pages except mailbox are blocked */ |
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) | |||
41 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; | 31 | return RREG32_NO_KIQ(0xc040) == 0xffffffff; |
42 | } | 32 | } |
43 | 33 | ||
44 | int amdgpu_allocate_static_csa(struct amdgpu_device *adev) | ||
45 | { | ||
46 | int r; | ||
47 | void *ptr; | ||
48 | |||
49 | r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, | ||
50 | AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, | ||
51 | &adev->virt.csa_vmid0_addr, &ptr); | ||
52 | if (r) | ||
53 | return r; | ||
54 | |||
55 | memset(ptr, 0, AMDGPU_CSA_SIZE); | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | void amdgpu_free_static_csa(struct amdgpu_device *adev) { | ||
60 | amdgpu_bo_free_kernel(&adev->virt.csa_obj, | ||
61 | &adev->virt.csa_vmid0_addr, | ||
62 | NULL); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * amdgpu_map_static_csa should be called during amdgpu_vm_init | ||
67 | * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command | ||
68 | * submission of GFX should use this virtual address within META_DATA init | ||
69 | * package to support SRIOV gfx preemption. | ||
70 | */ | ||
71 | int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
72 | struct amdgpu_bo_va **bo_va) | ||
73 | { | ||
74 | uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; | ||
75 | struct ww_acquire_ctx ticket; | ||
76 | struct list_head list; | ||
77 | struct amdgpu_bo_list_entry pd; | ||
78 | struct ttm_validate_buffer csa_tv; | ||
79 | int r; | ||
80 | |||
81 | INIT_LIST_HEAD(&list); | ||
82 | INIT_LIST_HEAD(&csa_tv.head); | ||
83 | csa_tv.bo = &adev->virt.csa_obj->tbo; | ||
84 | csa_tv.shared = true; | ||
85 | |||
86 | list_add(&csa_tv.head, &list); | ||
87 | amdgpu_vm_get_pd_bo(vm, &list, &pd); | ||
88 | |||
89 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); | ||
90 | if (r) { | ||
91 | DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); | ||
92 | return r; | ||
93 | } | ||
94 | |||
95 | *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); | ||
96 | if (!*bo_va) { | ||
97 | ttm_eu_backoff_reservation(&ticket, &list); | ||
98 | DRM_ERROR("failed to create bo_va for static CSA\n"); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | |||
102 | r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, | ||
103 | AMDGPU_CSA_SIZE); | ||
104 | if (r) { | ||
105 | DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); | ||
106 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
107 | ttm_eu_backoff_reservation(&ticket, &list); | ||
108 | return r; | ||
109 | } | ||
110 | |||
111 | r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, | ||
112 | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | | ||
113 | AMDGPU_PTE_EXECUTABLE); | ||
114 | |||
115 | if (r) { | ||
116 | DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); | ||
117 | amdgpu_vm_bo_rmv(adev, *bo_va); | ||
118 | ttm_eu_backoff_reservation(&ticket, &list); | ||
119 | return r; | ||
120 | } | ||
121 | |||
122 | ttm_eu_backoff_reservation(&ticket, &list); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) | 34 | void amdgpu_virt_init_setting(struct amdgpu_device *adev) |
127 | { | 35 | { |
128 | /* enable virtual display */ | 36 | /* enable virtual display */ |
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) | |||
162 | if (r < 1 && (adev->in_gpu_reset || in_interrupt())) | 70 | if (r < 1 && (adev->in_gpu_reset || in_interrupt())) |
163 | goto failed_kiq_read; | 71 | goto failed_kiq_read; |
164 | 72 | ||
165 | if (in_interrupt()) | 73 | might_sleep(); |
166 | might_sleep(); | ||
167 | |||
168 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | 74 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
169 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | 75 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
170 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | 76 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); |
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) | |||
210 | if (r < 1 && (adev->in_gpu_reset || in_interrupt())) | 116 | if (r < 1 && (adev->in_gpu_reset || in_interrupt())) |
211 | goto failed_kiq_write; | 117 | goto failed_kiq_write; |
212 | 118 | ||
213 | if (in_interrupt()) | 119 | might_sleep(); |
214 | might_sleep(); | ||
215 | |||
216 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | 120 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
217 | 121 | ||
218 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | 122 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
@@ -228,6 +132,46 @@ failed_kiq_write: | |||
228 | pr_err("failed to write reg:%x\n", reg); | 132 | pr_err("failed to write reg:%x\n", reg); |
229 | } | 133 | } |
230 | 134 | ||
135 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
136 | uint32_t reg0, uint32_t reg1, | ||
137 | uint32_t ref, uint32_t mask) | ||
138 | { | ||
139 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
140 | struct amdgpu_ring *ring = &kiq->ring; | ||
141 | signed long r, cnt = 0; | ||
142 | unsigned long flags; | ||
143 | uint32_t seq; | ||
144 | |||
145 | spin_lock_irqsave(&kiq->ring_lock, flags); | ||
146 | amdgpu_ring_alloc(ring, 32); | ||
147 | amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, | ||
148 | ref, mask); | ||
149 | amdgpu_fence_emit_polling(ring, &seq); | ||
150 | amdgpu_ring_commit(ring); | ||
151 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | ||
152 | |||
153 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
154 | |||
155 | /* don't wait anymore for IRQ context */ | ||
156 | if (r < 1 && in_interrupt()) | ||
157 | goto failed_kiq; | ||
158 | |||
159 | might_sleep(); | ||
160 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | ||
161 | |||
162 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | ||
163 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
164 | } | ||
165 | |||
166 | if (cnt > MAX_KIQ_REG_TRY) | ||
167 | goto failed_kiq; | ||
168 | |||
169 | return; | ||
170 | |||
171 | failed_kiq: | ||
172 | pr_err("failed to write reg %x wait reg %x\n", reg0, reg1); | ||
173 | } | ||
174 | |||
231 | /** | 175 | /** |
232 | * amdgpu_virt_request_full_gpu() - request full gpu access | 176 | * amdgpu_virt_request_full_gpu() - request full gpu access |
233 | * @amdgpu: amdgpu device. | 177 | * @amdgpu: amdgpu device. |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 880ac113a3a9..0728fbc9a692 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | |||
@@ -238,7 +238,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; | |||
238 | struct amdgpu_virt { | 238 | struct amdgpu_virt { |
239 | uint32_t caps; | 239 | uint32_t caps; |
240 | struct amdgpu_bo *csa_obj; | 240 | struct amdgpu_bo *csa_obj; |
241 | uint64_t csa_vmid0_addr; | ||
242 | bool chained_ib_support; | 241 | bool chained_ib_support; |
243 | uint32_t reg_val_offs; | 242 | uint32_t reg_val_offs; |
244 | struct amdgpu_irq_src ack_irq; | 243 | struct amdgpu_irq_src ack_irq; |
@@ -251,8 +250,6 @@ struct amdgpu_virt { | |||
251 | uint32_t gim_feature; | 250 | uint32_t gim_feature; |
252 | }; | 251 | }; |
253 | 252 | ||
254 | #define AMDGPU_CSA_SIZE (8 * 1024) | ||
255 | |||
256 | #define amdgpu_sriov_enabled(adev) \ | 253 | #define amdgpu_sriov_enabled(adev) \ |
257 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) | 254 | ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) |
258 | 255 | ||
@@ -277,17 +274,13 @@ static inline bool is_virtual_machine(void) | |||
277 | #endif | 274 | #endif |
278 | } | 275 | } |
279 | 276 | ||
280 | struct amdgpu_vm; | ||
281 | |||
282 | uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); | ||
283 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); | 277 | bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); |
284 | int amdgpu_allocate_static_csa(struct amdgpu_device *adev); | ||
285 | int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
286 | struct amdgpu_bo_va **bo_va); | ||
287 | void amdgpu_free_static_csa(struct amdgpu_device *adev); | ||
288 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); | 278 | void amdgpu_virt_init_setting(struct amdgpu_device *adev); |
289 | uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); | 279 | uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); |
290 | void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); | 280 | void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); |
281 | void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
282 | uint32_t reg0, uint32_t rreg1, | ||
283 | uint32_t ref, uint32_t mask); | ||
291 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); | 284 | int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); |
292 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); | 285 | int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); |
293 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); | 286 | int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d6c47972062a..58a2363040dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
1632 | continue; | 1632 | continue; |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | /* First check if the entry is already handled */ | ||
1636 | if (cursor.pfn < frag_start) { | ||
1637 | cursor.entry->huge = true; | ||
1638 | amdgpu_vm_pt_next(adev, &cursor); | ||
1639 | continue; | ||
1640 | } | ||
1641 | |||
1642 | /* If it isn't already handled it can't be a huge page */ | 1635 | /* If it isn't already handled it can't be a huge page */ |
1643 | if (cursor.entry->huge) { | 1636 | if (cursor.entry->huge) { |
1644 | /* Add the entry to the relocated list to update it. */ | 1637 | /* Add the entry to the relocated list to update it. */ |
@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
1701 | } | 1694 | } |
1702 | } while (frag_start < entry_end); | 1695 | } while (frag_start < entry_end); |
1703 | 1696 | ||
1704 | if (frag >= shift) | 1697 | if (amdgpu_vm_pt_descendant(adev, &cursor)) { |
1698 | /* Mark all child entries as huge */ | ||
1699 | while (cursor.pfn < frag_start) { | ||
1700 | cursor.entry->huge = true; | ||
1701 | amdgpu_vm_pt_next(adev, &cursor); | ||
1702 | } | ||
1703 | |||
1704 | } else if (frag >= shift) { | ||
1705 | /* or just move on to the next on the same level. */ | ||
1705 | amdgpu_vm_pt_next(adev, &cursor); | 1706 | amdgpu_vm_pt_next(adev, &cursor); |
1707 | } | ||
1706 | } | 1708 | } |
1707 | 1709 | ||
1708 | return 0; | 1710 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 897afbb348c1..909216a9b447 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | |||
@@ -63,7 +63,7 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) | |||
63 | 63 | ||
64 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | 64 | int amdgpu_xgmi_add_device(struct amdgpu_device *adev) |
65 | { | 65 | { |
66 | struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE]; | 66 | struct psp_xgmi_topology_info *tmp_topology; |
67 | struct amdgpu_hive_info *hive; | 67 | struct amdgpu_hive_info *hive; |
68 | struct amdgpu_xgmi *entry; | 68 | struct amdgpu_xgmi *entry; |
69 | struct amdgpu_device *tmp_adev; | 69 | struct amdgpu_device *tmp_adev; |
@@ -73,10 +73,12 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | |||
73 | if ((adev->asic_type < CHIP_VEGA20) || | 73 | if ((adev->asic_type < CHIP_VEGA20) || |
74 | (adev->flags & AMD_IS_APU) ) | 74 | (adev->flags & AMD_IS_APU) ) |
75 | return 0; | 75 | return 0; |
76 | adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp); | 76 | adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp); |
77 | adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); | 77 | adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp); |
78 | 78 | ||
79 | memset(&tmp_topology[0], 0, sizeof(tmp_topology)); | 79 | tmp_topology = kzalloc(sizeof(struct psp_xgmi_topology_info), GFP_KERNEL); |
80 | if (!tmp_topology) | ||
81 | return -ENOMEM; | ||
80 | mutex_lock(&xgmi_mutex); | 82 | mutex_lock(&xgmi_mutex); |
81 | hive = amdgpu_get_xgmi_hive(adev); | 83 | hive = amdgpu_get_xgmi_hive(adev); |
82 | if (!hive) | 84 | if (!hive) |
@@ -84,23 +86,28 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | |||
84 | 86 | ||
85 | list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); | 87 | list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); |
86 | list_for_each_entry(entry, &hive->device_list, head) | 88 | list_for_each_entry(entry, &hive->device_list, head) |
87 | tmp_topology[count++].device_id = entry->device_id; | 89 | tmp_topology->nodes[count++].node_id = entry->node_id; |
88 | 90 | ||
89 | ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology); | 91 | /* Each psp need to get the latest topology */ |
90 | if (ret) { | 92 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { |
91 | dev_err(adev->dev, | 93 | ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, tmp_topology); |
92 | "XGMI: Get topology failure on device %llx, hive %llx, ret %d", | 94 | if (ret) { |
93 | adev->gmc.xgmi.device_id, | 95 | dev_err(tmp_adev->dev, |
94 | adev->gmc.xgmi.hive_id, ret); | 96 | "XGMI: Get topology failure on device %llx, hive %llx, ret %d", |
95 | goto exit; | 97 | tmp_adev->gmc.xgmi.node_id, |
98 | tmp_adev->gmc.xgmi.hive_id, ret); | ||
99 | /* To do : continue with some node failed or disable the whole hive */ | ||
100 | break; | ||
101 | } | ||
96 | } | 102 | } |
103 | |||
97 | /* Each psp need to set the latest topology */ | 104 | /* Each psp need to set the latest topology */ |
98 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { | 105 | list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { |
99 | ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); | 106 | ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology); |
100 | if (ret) { | 107 | if (ret) { |
101 | dev_err(tmp_adev->dev, | 108 | dev_err(tmp_adev->dev, |
102 | "XGMI: Set topology failure on device %llx, hive %llx, ret %d", | 109 | "XGMI: Set topology failure on device %llx, hive %llx, ret %d", |
103 | tmp_adev->gmc.xgmi.device_id, | 110 | tmp_adev->gmc.xgmi.node_id, |
104 | tmp_adev->gmc.xgmi.hive_id, ret); | 111 | tmp_adev->gmc.xgmi.hive_id, ret); |
105 | /* To do : continue with some node failed or disable the whole hive */ | 112 | /* To do : continue with some node failed or disable the whole hive */ |
106 | break; | 113 | break; |
@@ -113,7 +120,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) | |||
113 | 120 | ||
114 | exit: | 121 | exit: |
115 | mutex_unlock(&xgmi_mutex); | 122 | mutex_unlock(&xgmi_mutex); |
123 | kfree(tmp_topology); | ||
116 | return ret; | 124 | return ret; |
117 | } | 125 | } |
118 | |||
119 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 79220a91abe3..86e14c754dd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) | |||
743 | 743 | ||
744 | if (pi->caps_sq_ramping || pi->caps_db_ramping || | 744 | if (pi->caps_sq_ramping || pi->caps_db_ramping || |
745 | pi->caps_td_ramping || pi->caps_tcp_ramping) { | 745 | pi->caps_td_ramping || pi->caps_tcp_ramping) { |
746 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 746 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
747 | 747 | ||
748 | if (enable) { | 748 | if (enable) { |
749 | ret = ci_program_pt_config_registers(adev, didt_config_ci); | 749 | ret = ci_program_pt_config_registers(adev, didt_config_ci); |
750 | if (ret) { | 750 | if (ret) { |
751 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 751 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
752 | return ret; | 752 | return ret; |
753 | } | 753 | } |
754 | } | 754 | } |
755 | 755 | ||
756 | ci_do_enable_didt(adev, enable); | 756 | ci_do_enable_didt(adev, enable); |
757 | 757 | ||
758 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 758 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
759 | } | 759 | } |
760 | 760 | ||
761 | return 0; | 761 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index b918c8886b75..45795191de1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) | |||
198 | 198 | ||
199 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 199 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
200 | { | 200 | { |
201 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 201 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
202 | int i; | 202 | int i; |
203 | 203 | ||
204 | for (i = 0; i < count; i++) | 204 | for (i = 0; i < count; i++) |
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
218 | * Schedule an IB in the DMA ring (CIK). | 218 | * Schedule an IB in the DMA ring (CIK). |
219 | */ | 219 | */ |
220 | static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, | 220 | static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, |
221 | struct amdgpu_job *job, | ||
221 | struct amdgpu_ib *ib, | 222 | struct amdgpu_ib *ib, |
222 | unsigned vmid, bool ctx_switch) | 223 | bool ctx_switch) |
223 | { | 224 | { |
225 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
224 | u32 extra_bits = vmid & 0xf; | 226 | u32 extra_bits = vmid & 0xf; |
225 | 227 | ||
226 | /* IB packet must end on a 8 DW boundary */ | 228 | /* IB packet must end on a 8 DW boundary */ |
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) | |||
316 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | 318 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); |
317 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); | 319 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); |
318 | } | 320 | } |
319 | sdma0->ready = false; | 321 | sdma0->sched.ready = false; |
320 | sdma1->ready = false; | 322 | sdma1->sched.ready = false; |
321 | } | 323 | } |
322 | 324 | ||
323 | /** | 325 | /** |
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
494 | /* enable DMA IBs */ | 496 | /* enable DMA IBs */ |
495 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 497 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
496 | 498 | ||
497 | ring->ready = true; | 499 | ring->sched.ready = true; |
498 | } | 500 | } |
499 | 501 | ||
500 | cik_sdma_enable(adev, true); | 502 | cik_sdma_enable(adev, true); |
501 | 503 | ||
502 | for (i = 0; i < adev->sdma.num_instances; i++) { | 504 | for (i = 0; i < adev->sdma.num_instances; i++) { |
503 | ring = &adev->sdma.instance[i].ring; | 505 | ring = &adev->sdma.instance[i].ring; |
504 | r = amdgpu_ring_test_ring(ring); | 506 | r = amdgpu_ring_test_helper(ring); |
505 | if (r) { | 507 | if (r) |
506 | ring->ready = false; | ||
507 | return r; | 508 | return r; |
508 | } | ||
509 | 509 | ||
510 | if (adev->mman.buffer_funcs_ring == ring) | 510 | if (adev->mman.buffer_funcs_ring == ring) |
511 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | 511 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) | |||
618 | u64 gpu_addr; | 618 | u64 gpu_addr; |
619 | 619 | ||
620 | r = amdgpu_device_wb_get(adev, &index); | 620 | r = amdgpu_device_wb_get(adev, &index); |
621 | if (r) { | 621 | if (r) |
622 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
623 | return r; | 622 | return r; |
624 | } | ||
625 | 623 | ||
626 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 624 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
627 | tmp = 0xCAFEDEAD; | 625 | tmp = 0xCAFEDEAD; |
628 | adev->wb.wb[index] = cpu_to_le32(tmp); | 626 | adev->wb.wb[index] = cpu_to_le32(tmp); |
629 | 627 | ||
630 | r = amdgpu_ring_alloc(ring, 5); | 628 | r = amdgpu_ring_alloc(ring, 5); |
631 | if (r) { | 629 | if (r) |
632 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 630 | goto error_free_wb; |
633 | amdgpu_device_wb_free(adev, index); | 631 | |
634 | return r; | ||
635 | } | ||
636 | amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); | 632 | amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); |
637 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | 633 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); |
638 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | 634 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); |
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) | |||
647 | DRM_UDELAY(1); | 643 | DRM_UDELAY(1); |
648 | } | 644 | } |
649 | 645 | ||
650 | if (i < adev->usec_timeout) { | 646 | if (i >= adev->usec_timeout) |
651 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 647 | r = -ETIMEDOUT; |
652 | } else { | ||
653 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
654 | ring->idx, tmp); | ||
655 | r = -EINVAL; | ||
656 | } | ||
657 | amdgpu_device_wb_free(adev, index); | ||
658 | 648 | ||
649 | error_free_wb: | ||
650 | amdgpu_device_wb_free(adev, index); | ||
659 | return r; | 651 | return r; |
660 | } | 652 | } |
661 | 653 | ||
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
678 | long r; | 670 | long r; |
679 | 671 | ||
680 | r = amdgpu_device_wb_get(adev, &index); | 672 | r = amdgpu_device_wb_get(adev, &index); |
681 | if (r) { | 673 | if (r) |
682 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
683 | return r; | 674 | return r; |
684 | } | ||
685 | 675 | ||
686 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 676 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
687 | tmp = 0xCAFEDEAD; | 677 | tmp = 0xCAFEDEAD; |
688 | adev->wb.wb[index] = cpu_to_le32(tmp); | 678 | adev->wb.wb[index] = cpu_to_le32(tmp); |
689 | memset(&ib, 0, sizeof(ib)); | 679 | memset(&ib, 0, sizeof(ib)); |
690 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 680 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
691 | if (r) { | 681 | if (r) |
692 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
693 | goto err0; | 682 | goto err0; |
694 | } | ||
695 | 683 | ||
696 | ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, | 684 | ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, |
697 | SDMA_WRITE_SUB_OPCODE_LINEAR, 0); | 685 | SDMA_WRITE_SUB_OPCODE_LINEAR, 0); |
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
706 | 694 | ||
707 | r = dma_fence_wait_timeout(f, false, timeout); | 695 | r = dma_fence_wait_timeout(f, false, timeout); |
708 | if (r == 0) { | 696 | if (r == 0) { |
709 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
710 | r = -ETIMEDOUT; | 697 | r = -ETIMEDOUT; |
711 | goto err1; | 698 | goto err1; |
712 | } else if (r < 0) { | 699 | } else if (r < 0) { |
713 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
714 | goto err1; | 700 | goto err1; |
715 | } | 701 | } |
716 | tmp = le32_to_cpu(adev->wb.wb[index]); | 702 | tmp = le32_to_cpu(adev->wb.wb[index]); |
717 | if (tmp == 0xDEADBEEF) { | 703 | if (tmp == 0xDEADBEEF) |
718 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
719 | r = 0; | 704 | r = 0; |
720 | } else { | 705 | else |
721 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
722 | r = -EINVAL; | 706 | r = -EINVAL; |
723 | } | ||
724 | 707 | ||
725 | err1: | 708 | err1: |
726 | amdgpu_ib_free(adev, &ib, NULL); | 709 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, | |||
822 | */ | 805 | */ |
823 | static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 806 | static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
824 | { | 807 | { |
825 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 808 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
826 | u32 pad_count; | 809 | u32 pad_count; |
827 | int i; | 810 | int i; |
828 | 811 | ||
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev, | |||
1214 | struct amdgpu_irq_src *source, | 1197 | struct amdgpu_irq_src *source, |
1215 | struct amdgpu_iv_entry *entry) | 1198 | struct amdgpu_iv_entry *entry) |
1216 | { | 1199 | { |
1200 | u8 instance_id; | ||
1201 | |||
1217 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | 1202 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); |
1218 | schedule_work(&adev->reset_work); | 1203 | instance_id = (entry->ring_id & 0x3) >> 0; |
1204 | drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); | ||
1219 | return 0; | 1205 | return 0; |
1220 | } | 1206 | } |
1221 | 1207 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index d76eb27945dc..1dc3013ea1d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1775 | int r; | 1775 | int r; |
1776 | 1776 | ||
1777 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 1777 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
1778 | if (r) { | 1778 | if (r) |
1779 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | ||
1780 | return r; | 1779 | return r; |
1781 | } | 1780 | |
1782 | WREG32(scratch, 0xCAFEDEAD); | 1781 | WREG32(scratch, 0xCAFEDEAD); |
1783 | 1782 | ||
1784 | r = amdgpu_ring_alloc(ring, 3); | 1783 | r = amdgpu_ring_alloc(ring, 3); |
1785 | if (r) { | 1784 | if (r) |
1786 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); | 1785 | goto error_free_scratch; |
1787 | amdgpu_gfx_scratch_free(adev, scratch); | 1786 | |
1788 | return r; | ||
1789 | } | ||
1790 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1787 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
1791 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START)); | 1788 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START)); |
1792 | amdgpu_ring_write(ring, 0xDEADBEEF); | 1789 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1798 | break; | 1795 | break; |
1799 | DRM_UDELAY(1); | 1796 | DRM_UDELAY(1); |
1800 | } | 1797 | } |
1801 | if (i < adev->usec_timeout) { | 1798 | |
1802 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 1799 | if (i >= adev->usec_timeout) |
1803 | } else { | 1800 | r = -ETIMEDOUT; |
1804 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | 1801 | |
1805 | ring->idx, scratch, tmp); | 1802 | error_free_scratch: |
1806 | r = -EINVAL; | ||
1807 | } | ||
1808 | amdgpu_gfx_scratch_free(adev, scratch); | 1803 | amdgpu_gfx_scratch_free(adev, scratch); |
1809 | return r; | 1804 | return r; |
1810 | } | 1805 | } |
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, | |||
1845 | } | 1840 | } |
1846 | 1841 | ||
1847 | static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | 1842 | static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, |
1843 | struct amdgpu_job *job, | ||
1848 | struct amdgpu_ib *ib, | 1844 | struct amdgpu_ib *ib, |
1849 | unsigned vmid, bool ctx_switch) | 1845 | bool ctx_switch) |
1850 | { | 1846 | { |
1847 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1851 | u32 header, control = 0; | 1848 | u32 header, control = 0; |
1852 | 1849 | ||
1853 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 1850 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1892 | long r; | 1889 | long r; |
1893 | 1890 | ||
1894 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 1891 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
1895 | if (r) { | 1892 | if (r) |
1896 | DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); | ||
1897 | return r; | 1893 | return r; |
1898 | } | 1894 | |
1899 | WREG32(scratch, 0xCAFEDEAD); | 1895 | WREG32(scratch, 0xCAFEDEAD); |
1900 | memset(&ib, 0, sizeof(ib)); | 1896 | memset(&ib, 0, sizeof(ib)); |
1901 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 1897 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
1902 | if (r) { | 1898 | if (r) |
1903 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
1904 | goto err1; | 1899 | goto err1; |
1905 | } | 1900 | |
1906 | ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); | 1901 | ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); |
1907 | ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START)); | 1902 | ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START)); |
1908 | ib.ptr[2] = 0xDEADBEEF; | 1903 | ib.ptr[2] = 0xDEADBEEF; |
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1914 | 1909 | ||
1915 | r = dma_fence_wait_timeout(f, false, timeout); | 1910 | r = dma_fence_wait_timeout(f, false, timeout); |
1916 | if (r == 0) { | 1911 | if (r == 0) { |
1917 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
1918 | r = -ETIMEDOUT; | 1912 | r = -ETIMEDOUT; |
1919 | goto err2; | 1913 | goto err2; |
1920 | } else if (r < 0) { | 1914 | } else if (r < 0) { |
1921 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
1922 | goto err2; | 1915 | goto err2; |
1923 | } | 1916 | } |
1924 | tmp = RREG32(scratch); | 1917 | tmp = RREG32(scratch); |
1925 | if (tmp == 0xDEADBEEF) { | 1918 | if (tmp == 0xDEADBEEF) |
1926 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
1927 | r = 0; | 1919 | r = 0; |
1928 | } else { | 1920 | else |
1929 | DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", | ||
1930 | scratch, tmp); | ||
1931 | r = -EINVAL; | 1921 | r = -EINVAL; |
1932 | } | ||
1933 | 1922 | ||
1934 | err2: | 1923 | err2: |
1935 | amdgpu_ib_free(adev, &ib, NULL); | 1924 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |||
1950 | CP_ME_CNTL__CE_HALT_MASK)); | 1939 | CP_ME_CNTL__CE_HALT_MASK)); |
1951 | WREG32(mmSCRATCH_UMSK, 0); | 1940 | WREG32(mmSCRATCH_UMSK, 0); |
1952 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 1941 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
1953 | adev->gfx.gfx_ring[i].ready = false; | 1942 | adev->gfx.gfx_ring[i].sched.ready = false; |
1954 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 1943 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
1955 | adev->gfx.compute_ring[i].ready = false; | 1944 | adev->gfx.compute_ring[i].sched.ready = false; |
1956 | } | 1945 | } |
1957 | udelay(50); | 1946 | udelay(50); |
1958 | } | 1947 | } |
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
2124 | 2113 | ||
2125 | /* start the rings */ | 2114 | /* start the rings */ |
2126 | gfx_v6_0_cp_gfx_start(adev); | 2115 | gfx_v6_0_cp_gfx_start(adev); |
2127 | ring->ready = true; | 2116 | r = amdgpu_ring_test_helper(ring); |
2128 | r = amdgpu_ring_test_ring(ring); | 2117 | if (r) |
2129 | if (r) { | ||
2130 | ring->ready = false; | ||
2131 | return r; | 2118 | return r; |
2132 | } | ||
2133 | 2119 | ||
2134 | return 0; | 2120 | return 0; |
2135 | } | 2121 | } |
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev) | |||
2227 | WREG32(mmCP_RB2_CNTL, tmp); | 2213 | WREG32(mmCP_RB2_CNTL, tmp); |
2228 | WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8); | 2214 | WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8); |
2229 | 2215 | ||
2230 | adev->gfx.compute_ring[0].ready = false; | ||
2231 | adev->gfx.compute_ring[1].ready = false; | ||
2232 | 2216 | ||
2233 | for (i = 0; i < 2; i++) { | 2217 | for (i = 0; i < 2; i++) { |
2234 | r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]); | 2218 | r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]); |
2235 | if (r) | 2219 | if (r) |
2236 | return r; | 2220 | return r; |
2237 | adev->gfx.compute_ring[i].ready = true; | ||
2238 | } | 2221 | } |
2239 | 2222 | ||
2240 | return 0; | 2223 | return 0; |
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, | |||
2368 | amdgpu_ring_write(ring, val); | 2351 | amdgpu_ring_write(ring, val); |
2369 | } | 2352 | } |
2370 | 2353 | ||
2371 | static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) | ||
2372 | { | ||
2373 | amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); | ||
2374 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); | ||
2375 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); | ||
2376 | } | ||
2377 | |||
2378 | static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | 2354 | static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) |
2379 | { | 2355 | { |
2380 | const u32 *src_ptr; | 2356 | const u32 *src_ptr; |
2381 | volatile u32 *dst_ptr; | 2357 | volatile u32 *dst_ptr; |
2382 | u32 dws, i; | 2358 | u32 dws; |
2383 | u64 reg_list_mc_addr; | 2359 | u64 reg_list_mc_addr; |
2384 | const struct cs_section_def *cs_data; | 2360 | const struct cs_section_def *cs_data; |
2385 | int r; | 2361 | int r; |
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2394 | cs_data = adev->gfx.rlc.cs_data; | 2370 | cs_data = adev->gfx.rlc.cs_data; |
2395 | 2371 | ||
2396 | if (src_ptr) { | 2372 | if (src_ptr) { |
2397 | /* save restore block */ | 2373 | /* init save restore block */ |
2398 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 2374 | r = amdgpu_gfx_rlc_init_sr(adev, dws); |
2399 | AMDGPU_GEM_DOMAIN_VRAM, | 2375 | if (r) |
2400 | &adev->gfx.rlc.save_restore_obj, | ||
2401 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
2402 | (void **)&adev->gfx.rlc.sr_ptr); | ||
2403 | if (r) { | ||
2404 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", | ||
2405 | r); | ||
2406 | gfx_v6_0_rlc_fini(adev); | ||
2407 | return r; | 2376 | return r; |
2408 | } | ||
2409 | |||
2410 | /* write the sr buffer */ | ||
2411 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
2412 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
2413 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
2414 | |||
2415 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
2416 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
2417 | } | 2377 | } |
2418 | 2378 | ||
2419 | if (cs_data) { | 2379 | if (cs_data) { |
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2428 | (void **)&adev->gfx.rlc.cs_ptr); | 2388 | (void **)&adev->gfx.rlc.cs_ptr); |
2429 | if (r) { | 2389 | if (r) { |
2430 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | 2390 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); |
2431 | gfx_v6_0_rlc_fini(adev); | 2391 | amdgpu_gfx_rlc_fini(adev); |
2432 | return r; | 2392 | return r; |
2433 | } | 2393 | } |
2434 | 2394 | ||
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) | |||
2549 | if (!adev->gfx.rlc_fw) | 2509 | if (!adev->gfx.rlc_fw) |
2550 | return -EINVAL; | 2510 | return -EINVAL; |
2551 | 2511 | ||
2552 | gfx_v6_0_rlc_stop(adev); | 2512 | adev->gfx.rlc.funcs->stop(adev); |
2553 | gfx_v6_0_rlc_reset(adev); | 2513 | adev->gfx.rlc.funcs->reset(adev); |
2554 | gfx_v6_0_init_pg(adev); | 2514 | gfx_v6_0_init_pg(adev); |
2555 | gfx_v6_0_init_cg(adev); | 2515 | gfx_v6_0_init_cg(adev); |
2556 | 2516 | ||
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev) | |||
2578 | WREG32(mmRLC_UCODE_ADDR, 0); | 2538 | WREG32(mmRLC_UCODE_ADDR, 0); |
2579 | 2539 | ||
2580 | gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev)); | 2540 | gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev)); |
2581 | gfx_v6_0_rlc_start(adev); | 2541 | adev->gfx.rlc.funcs->start(adev); |
2582 | 2542 | ||
2583 | return 0; | 2543 | return 0; |
2584 | } | 2544 | } |
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = { | |||
3075 | .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q | 3035 | .select_me_pipe_q = &gfx_v6_0_select_me_pipe_q |
3076 | }; | 3036 | }; |
3077 | 3037 | ||
3038 | static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = { | ||
3039 | .init = gfx_v6_0_rlc_init, | ||
3040 | .resume = gfx_v6_0_rlc_resume, | ||
3041 | .stop = gfx_v6_0_rlc_stop, | ||
3042 | .reset = gfx_v6_0_rlc_reset, | ||
3043 | .start = gfx_v6_0_rlc_start | ||
3044 | }; | ||
3045 | |||
3078 | static int gfx_v6_0_early_init(void *handle) | 3046 | static int gfx_v6_0_early_init(void *handle) |
3079 | { | 3047 | { |
3080 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 3048 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle) | |||
3082 | adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; | 3050 | adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS; |
3083 | adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS; | 3051 | adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS; |
3084 | adev->gfx.funcs = &gfx_v6_0_gfx_funcs; | 3052 | adev->gfx.funcs = &gfx_v6_0_gfx_funcs; |
3053 | adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs; | ||
3085 | gfx_v6_0_set_ring_funcs(adev); | 3054 | gfx_v6_0_set_ring_funcs(adev); |
3086 | gfx_v6_0_set_irq_funcs(adev); | 3055 | gfx_v6_0_set_irq_funcs(adev); |
3087 | 3056 | ||
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle) | |||
3114 | return r; | 3083 | return r; |
3115 | } | 3084 | } |
3116 | 3085 | ||
3117 | r = gfx_v6_0_rlc_init(adev); | 3086 | r = adev->gfx.rlc.funcs->init(adev); |
3118 | if (r) { | 3087 | if (r) { |
3119 | DRM_ERROR("Failed to init rlc BOs!\n"); | 3088 | DRM_ERROR("Failed to init rlc BOs!\n"); |
3120 | return r; | 3089 | return r; |
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle) | |||
3165 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 3134 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
3166 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | 3135 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
3167 | 3136 | ||
3168 | gfx_v6_0_rlc_fini(adev); | 3137 | amdgpu_gfx_rlc_fini(adev); |
3169 | 3138 | ||
3170 | return 0; | 3139 | return 0; |
3171 | } | 3140 | } |
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle) | |||
3177 | 3146 | ||
3178 | gfx_v6_0_constants_init(adev); | 3147 | gfx_v6_0_constants_init(adev); |
3179 | 3148 | ||
3180 | r = gfx_v6_0_rlc_resume(adev); | 3149 | r = adev->gfx.rlc.funcs->resume(adev); |
3181 | if (r) | 3150 | if (r) |
3182 | return r; | 3151 | return r; |
3183 | 3152 | ||
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle) | |||
3195 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 3164 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
3196 | 3165 | ||
3197 | gfx_v6_0_cp_enable(adev, false); | 3166 | gfx_v6_0_cp_enable(adev, false); |
3198 | gfx_v6_0_rlc_stop(adev); | 3167 | adev->gfx.rlc.funcs->stop(adev); |
3199 | gfx_v6_0_fini_pg(adev); | 3168 | gfx_v6_0_fini_pg(adev); |
3200 | 3169 | ||
3201 | return 0; | 3170 | return 0; |
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev, | |||
3393 | return 0; | 3362 | return 0; |
3394 | } | 3363 | } |
3395 | 3364 | ||
3365 | static void gfx_v6_0_fault(struct amdgpu_device *adev, | ||
3366 | struct amdgpu_iv_entry *entry) | ||
3367 | { | ||
3368 | struct amdgpu_ring *ring; | ||
3369 | |||
3370 | switch (entry->ring_id) { | ||
3371 | case 0: | ||
3372 | ring = &adev->gfx.gfx_ring[0]; | ||
3373 | break; | ||
3374 | case 1: | ||
3375 | case 2: | ||
3376 | ring = &adev->gfx.compute_ring[entry->ring_id - 1]; | ||
3377 | break; | ||
3378 | default: | ||
3379 | return; | ||
3380 | } | ||
3381 | drm_sched_fault(&ring->sched); | ||
3382 | } | ||
3383 | |||
3396 | static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev, | 3384 | static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev, |
3397 | struct amdgpu_irq_src *source, | 3385 | struct amdgpu_irq_src *source, |
3398 | struct amdgpu_iv_entry *entry) | 3386 | struct amdgpu_iv_entry *entry) |
3399 | { | 3387 | { |
3400 | DRM_ERROR("Illegal register access in command stream\n"); | 3388 | DRM_ERROR("Illegal register access in command stream\n"); |
3401 | schedule_work(&adev->reset_work); | 3389 | gfx_v6_0_fault(adev, entry); |
3402 | return 0; | 3390 | return 0; |
3403 | } | 3391 | } |
3404 | 3392 | ||
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev, | |||
3407 | struct amdgpu_iv_entry *entry) | 3395 | struct amdgpu_iv_entry *entry) |
3408 | { | 3396 | { |
3409 | DRM_ERROR("Illegal instruction in command stream\n"); | 3397 | DRM_ERROR("Illegal instruction in command stream\n"); |
3410 | schedule_work(&adev->reset_work); | 3398 | gfx_v6_0_fault(adev, entry); |
3411 | return 0; | 3399 | return 0; |
3412 | } | 3400 | } |
3413 | 3401 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 0e72bc09939a..f467b9bd090d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] = | |||
882 | 882 | ||
883 | static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); | 883 | static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); |
884 | static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); | 884 | static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); |
885 | static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); | ||
886 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev); | 885 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev); |
887 | static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); | 886 | static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); |
888 | 887 | ||
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2064 | int r; | 2063 | int r; |
2065 | 2064 | ||
2066 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 2065 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
2067 | if (r) { | 2066 | if (r) |
2068 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | ||
2069 | return r; | 2067 | return r; |
2070 | } | 2068 | |
2071 | WREG32(scratch, 0xCAFEDEAD); | 2069 | WREG32(scratch, 0xCAFEDEAD); |
2072 | r = amdgpu_ring_alloc(ring, 3); | 2070 | r = amdgpu_ring_alloc(ring, 3); |
2073 | if (r) { | 2071 | if (r) |
2074 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r); | 2072 | goto error_free_scratch; |
2075 | amdgpu_gfx_scratch_free(adev, scratch); | 2073 | |
2076 | return r; | ||
2077 | } | ||
2078 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 2074 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
2079 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | 2075 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); |
2080 | amdgpu_ring_write(ring, 0xDEADBEEF); | 2076 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
2086 | break; | 2082 | break; |
2087 | DRM_UDELAY(1); | 2083 | DRM_UDELAY(1); |
2088 | } | 2084 | } |
2089 | if (i < adev->usec_timeout) { | 2085 | if (i >= adev->usec_timeout) |
2090 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 2086 | r = -ETIMEDOUT; |
2091 | } else { | 2087 | |
2092 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | 2088 | error_free_scratch: |
2093 | ring->idx, scratch, tmp); | ||
2094 | r = -EINVAL; | ||
2095 | } | ||
2096 | amdgpu_gfx_scratch_free(adev, scratch); | 2089 | amdgpu_gfx_scratch_free(adev, scratch); |
2097 | return r; | 2090 | return r; |
2098 | } | 2091 | } |
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, | |||
2233 | * on the gfx ring for execution by the GPU. | 2226 | * on the gfx ring for execution by the GPU. |
2234 | */ | 2227 | */ |
2235 | static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | 2228 | static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
2236 | struct amdgpu_ib *ib, | 2229 | struct amdgpu_job *job, |
2237 | unsigned vmid, bool ctx_switch) | 2230 | struct amdgpu_ib *ib, |
2231 | bool ctx_switch) | ||
2238 | { | 2232 | { |
2233 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
2239 | u32 header, control = 0; | 2234 | u32 header, control = 0; |
2240 | 2235 | ||
2241 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | 2236 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ |
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | |||
2262 | } | 2257 | } |
2263 | 2258 | ||
2264 | static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | 2259 | static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, |
2260 | struct amdgpu_job *job, | ||
2265 | struct amdgpu_ib *ib, | 2261 | struct amdgpu_ib *ib, |
2266 | unsigned vmid, bool ctx_switch) | 2262 | bool ctx_switch) |
2267 | { | 2263 | { |
2264 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
2268 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); | 2265 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); |
2269 | 2266 | ||
2270 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 2267 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2316 | long r; | 2313 | long r; |
2317 | 2314 | ||
2318 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 2315 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
2319 | if (r) { | 2316 | if (r) |
2320 | DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); | ||
2321 | return r; | 2317 | return r; |
2322 | } | 2318 | |
2323 | WREG32(scratch, 0xCAFEDEAD); | 2319 | WREG32(scratch, 0xCAFEDEAD); |
2324 | memset(&ib, 0, sizeof(ib)); | 2320 | memset(&ib, 0, sizeof(ib)); |
2325 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 2321 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
2326 | if (r) { | 2322 | if (r) |
2327 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
2328 | goto err1; | 2323 | goto err1; |
2329 | } | 2324 | |
2330 | ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); | 2325 | ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); |
2331 | ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); | 2326 | ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); |
2332 | ib.ptr[2] = 0xDEADBEEF; | 2327 | ib.ptr[2] = 0xDEADBEEF; |
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
2338 | 2333 | ||
2339 | r = dma_fence_wait_timeout(f, false, timeout); | 2334 | r = dma_fence_wait_timeout(f, false, timeout); |
2340 | if (r == 0) { | 2335 | if (r == 0) { |
2341 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
2342 | r = -ETIMEDOUT; | 2336 | r = -ETIMEDOUT; |
2343 | goto err2; | 2337 | goto err2; |
2344 | } else if (r < 0) { | 2338 | } else if (r < 0) { |
2345 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
2346 | goto err2; | 2339 | goto err2; |
2347 | } | 2340 | } |
2348 | tmp = RREG32(scratch); | 2341 | tmp = RREG32(scratch); |
2349 | if (tmp == 0xDEADBEEF) { | 2342 | if (tmp == 0xDEADBEEF) |
2350 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
2351 | r = 0; | 2343 | r = 0; |
2352 | } else { | 2344 | else |
2353 | DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", | ||
2354 | scratch, tmp); | ||
2355 | r = -EINVAL; | 2345 | r = -EINVAL; |
2356 | } | ||
2357 | 2346 | ||
2358 | err2: | 2347 | err2: |
2359 | amdgpu_ib_free(adev, &ib, NULL); | 2348 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |||
2403 | } else { | 2392 | } else { |
2404 | WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); | 2393 | WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); |
2405 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 2394 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
2406 | adev->gfx.gfx_ring[i].ready = false; | 2395 | adev->gfx.gfx_ring[i].sched.ready = false; |
2407 | } | 2396 | } |
2408 | udelay(50); | 2397 | udelay(50); |
2409 | } | 2398 | } |
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
2613 | 2602 | ||
2614 | /* start the ring */ | 2603 | /* start the ring */ |
2615 | gfx_v7_0_cp_gfx_start(adev); | 2604 | gfx_v7_0_cp_gfx_start(adev); |
2616 | ring->ready = true; | 2605 | r = amdgpu_ring_test_helper(ring); |
2617 | r = amdgpu_ring_test_ring(ring); | 2606 | if (r) |
2618 | if (r) { | ||
2619 | ring->ready = false; | ||
2620 | return r; | 2607 | return r; |
2621 | } | ||
2622 | 2608 | ||
2623 | return 0; | 2609 | return 0; |
2624 | } | 2610 | } |
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | |||
2675 | } else { | 2661 | } else { |
2676 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); | 2662 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
2677 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 2663 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
2678 | adev->gfx.compute_ring[i].ready = false; | 2664 | adev->gfx.compute_ring[i].sched.ready = false; |
2679 | } | 2665 | } |
2680 | udelay(50); | 2666 | udelay(50); |
2681 | } | 2667 | } |
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | |||
2781 | * GFX7_MEC_HPD_SIZE * 2; | 2767 | * GFX7_MEC_HPD_SIZE * 2; |
2782 | 2768 | ||
2783 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, | 2769 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
2784 | AMDGPU_GEM_DOMAIN_GTT, | 2770 | AMDGPU_GEM_DOMAIN_VRAM, |
2785 | &adev->gfx.mec.hpd_eop_obj, | 2771 | &adev->gfx.mec.hpd_eop_obj, |
2786 | &adev->gfx.mec.hpd_eop_gpu_addr, | 2772 | &adev->gfx.mec.hpd_eop_gpu_addr, |
2787 | (void **)&hpd); | 2773 | (void **)&hpd); |
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev) | |||
3106 | 3092 | ||
3107 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 3093 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
3108 | ring = &adev->gfx.compute_ring[i]; | 3094 | ring = &adev->gfx.compute_ring[i]; |
3109 | ring->ready = true; | 3095 | amdgpu_ring_test_helper(ring); |
3110 | r = amdgpu_ring_test_ring(ring); | ||
3111 | if (r) | ||
3112 | ring->ready = false; | ||
3113 | } | 3096 | } |
3114 | 3097 | ||
3115 | return 0; | 3098 | return 0; |
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, | |||
3268 | * The RLC is a multi-purpose microengine that handles a | 3251 | * The RLC is a multi-purpose microengine that handles a |
3269 | * variety of functions. | 3252 | * variety of functions. |
3270 | */ | 3253 | */ |
3271 | static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) | ||
3272 | { | ||
3273 | amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); | ||
3274 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); | ||
3275 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); | ||
3276 | } | ||
3277 | |||
3278 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | 3254 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) |
3279 | { | 3255 | { |
3280 | const u32 *src_ptr; | 3256 | const u32 *src_ptr; |
3281 | volatile u32 *dst_ptr; | 3257 | u32 dws; |
3282 | u32 dws, i; | ||
3283 | const struct cs_section_def *cs_data; | 3258 | const struct cs_section_def *cs_data; |
3284 | int r; | 3259 | int r; |
3285 | 3260 | ||
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3306 | cs_data = adev->gfx.rlc.cs_data; | 3281 | cs_data = adev->gfx.rlc.cs_data; |
3307 | 3282 | ||
3308 | if (src_ptr) { | 3283 | if (src_ptr) { |
3309 | /* save restore block */ | 3284 | /* init save restore block */ |
3310 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 3285 | r = amdgpu_gfx_rlc_init_sr(adev, dws); |
3311 | AMDGPU_GEM_DOMAIN_VRAM, | 3286 | if (r) |
3312 | &adev->gfx.rlc.save_restore_obj, | ||
3313 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
3314 | (void **)&adev->gfx.rlc.sr_ptr); | ||
3315 | if (r) { | ||
3316 | dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); | ||
3317 | gfx_v7_0_rlc_fini(adev); | ||
3318 | return r; | 3287 | return r; |
3319 | } | ||
3320 | |||
3321 | /* write the sr buffer */ | ||
3322 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
3323 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
3324 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
3325 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
3326 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
3327 | } | 3288 | } |
3328 | 3289 | ||
3329 | if (cs_data) { | 3290 | if (cs_data) { |
3330 | /* clear state block */ | 3291 | /* init clear state block */ |
3331 | adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); | 3292 | r = amdgpu_gfx_rlc_init_csb(adev); |
3332 | 3293 | if (r) | |
3333 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
3334 | AMDGPU_GEM_DOMAIN_VRAM, | ||
3335 | &adev->gfx.rlc.clear_state_obj, | ||
3336 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
3337 | (void **)&adev->gfx.rlc.cs_ptr); | ||
3338 | if (r) { | ||
3339 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
3340 | gfx_v7_0_rlc_fini(adev); | ||
3341 | return r; | 3294 | return r; |
3342 | } | ||
3343 | |||
3344 | /* set up the cs buffer */ | ||
3345 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
3346 | gfx_v7_0_get_csb_buffer(adev, dst_ptr); | ||
3347 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
3348 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
3349 | } | 3295 | } |
3350 | 3296 | ||
3351 | if (adev->gfx.rlc.cp_table_size) { | 3297 | if (adev->gfx.rlc.cp_table_size) { |
3352 | 3298 | r = amdgpu_gfx_rlc_init_cpt(adev); | |
3353 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 3299 | if (r) |
3354 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | ||
3355 | &adev->gfx.rlc.cp_table_obj, | ||
3356 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
3357 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
3358 | if (r) { | ||
3359 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
3360 | gfx_v7_0_rlc_fini(adev); | ||
3361 | return r; | 3300 | return r; |
3362 | } | ||
3363 | |||
3364 | gfx_v7_0_init_cp_pg_table(adev); | ||
3365 | |||
3366 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
3367 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
3368 | |||
3369 | } | 3301 | } |
3370 | 3302 | ||
3371 | return 0; | 3303 | return 0; |
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) | |||
3446 | return orig; | 3378 | return orig; |
3447 | } | 3379 | } |
3448 | 3380 | ||
3449 | static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | 3381 | static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) |
3382 | { | ||
3383 | return true; | ||
3384 | } | ||
3385 | |||
3386 | static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) | ||
3450 | { | 3387 | { |
3451 | u32 tmp, i, mask; | 3388 | u32 tmp, i, mask; |
3452 | 3389 | ||
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | |||
3468 | } | 3405 | } |
3469 | } | 3406 | } |
3470 | 3407 | ||
3471 | static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) | 3408 | static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev) |
3472 | { | 3409 | { |
3473 | u32 tmp; | 3410 | u32 tmp; |
3474 | 3411 | ||
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) | |||
3545 | adev->gfx.rlc_feature_version = le32_to_cpu( | 3482 | adev->gfx.rlc_feature_version = le32_to_cpu( |
3546 | hdr->ucode_feature_version); | 3483 | hdr->ucode_feature_version); |
3547 | 3484 | ||
3548 | gfx_v7_0_rlc_stop(adev); | 3485 | adev->gfx.rlc.funcs->stop(adev); |
3549 | 3486 | ||
3550 | /* disable CG */ | 3487 | /* disable CG */ |
3551 | tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; | 3488 | tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc; |
3552 | WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); | 3489 | WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); |
3553 | 3490 | ||
3554 | gfx_v7_0_rlc_reset(adev); | 3491 | adev->gfx.rlc.funcs->reset(adev); |
3555 | 3492 | ||
3556 | gfx_v7_0_init_pg(adev); | 3493 | gfx_v7_0_init_pg(adev); |
3557 | 3494 | ||
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) | |||
3582 | if (adev->asic_type == CHIP_BONAIRE) | 3519 | if (adev->asic_type == CHIP_BONAIRE) |
3583 | WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); | 3520 | WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0); |
3584 | 3521 | ||
3585 | gfx_v7_0_rlc_start(adev); | 3522 | adev->gfx.rlc.funcs->start(adev); |
3586 | 3523 | ||
3587 | return 0; | 3524 | return 0; |
3588 | } | 3525 | } |
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) | |||
3784 | WREG32(mmRLC_PG_CNTL, data); | 3721 | WREG32(mmRLC_PG_CNTL, data); |
3785 | } | 3722 | } |
3786 | 3723 | ||
3787 | static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) | 3724 | static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev) |
3788 | { | 3725 | { |
3789 | const __le32 *fw_data; | ||
3790 | volatile u32 *dst_ptr; | ||
3791 | int me, i, max_me = 4; | ||
3792 | u32 bo_offset = 0; | ||
3793 | u32 table_offset, table_size; | ||
3794 | |||
3795 | if (adev->asic_type == CHIP_KAVERI) | 3726 | if (adev->asic_type == CHIP_KAVERI) |
3796 | max_me = 5; | 3727 | return 5; |
3797 | 3728 | else | |
3798 | if (adev->gfx.rlc.cp_table_ptr == NULL) | 3729 | return 4; |
3799 | return; | ||
3800 | |||
3801 | /* write the cp table buffer */ | ||
3802 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
3803 | for (me = 0; me < max_me; me++) { | ||
3804 | if (me == 0) { | ||
3805 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3806 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
3807 | fw_data = (const __le32 *) | ||
3808 | (adev->gfx.ce_fw->data + | ||
3809 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3810 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3811 | table_size = le32_to_cpu(hdr->jt_size); | ||
3812 | } else if (me == 1) { | ||
3813 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3814 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
3815 | fw_data = (const __le32 *) | ||
3816 | (adev->gfx.pfp_fw->data + | ||
3817 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3818 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3819 | table_size = le32_to_cpu(hdr->jt_size); | ||
3820 | } else if (me == 2) { | ||
3821 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3822 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
3823 | fw_data = (const __le32 *) | ||
3824 | (adev->gfx.me_fw->data + | ||
3825 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3826 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3827 | table_size = le32_to_cpu(hdr->jt_size); | ||
3828 | } else if (me == 3) { | ||
3829 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3830 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
3831 | fw_data = (const __le32 *) | ||
3832 | (adev->gfx.mec_fw->data + | ||
3833 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3834 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3835 | table_size = le32_to_cpu(hdr->jt_size); | ||
3836 | } else { | ||
3837 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3838 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
3839 | fw_data = (const __le32 *) | ||
3840 | (adev->gfx.mec2_fw->data + | ||
3841 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3842 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3843 | table_size = le32_to_cpu(hdr->jt_size); | ||
3844 | } | ||
3845 | |||
3846 | for (i = 0; i < table_size; i ++) { | ||
3847 | dst_ptr[bo_offset + i] = | ||
3848 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
3849 | } | ||
3850 | |||
3851 | bo_offset += table_size; | ||
3852 | } | ||
3853 | } | 3730 | } |
3854 | 3731 | ||
3855 | static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, | 3732 | static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, |
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { | |||
4288 | }; | 4165 | }; |
4289 | 4166 | ||
4290 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { | 4167 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { |
4291 | .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, | 4168 | .is_rlc_enabled = gfx_v7_0_is_rlc_enabled, |
4292 | .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode | 4169 | .set_safe_mode = gfx_v7_0_set_safe_mode, |
4170 | .unset_safe_mode = gfx_v7_0_unset_safe_mode, | ||
4171 | .init = gfx_v7_0_rlc_init, | ||
4172 | .get_csb_size = gfx_v7_0_get_csb_size, | ||
4173 | .get_csb_buffer = gfx_v7_0_get_csb_buffer, | ||
4174 | .get_cp_table_num = gfx_v7_0_cp_pg_table_num, | ||
4175 | .resume = gfx_v7_0_rlc_resume, | ||
4176 | .stop = gfx_v7_0_rlc_stop, | ||
4177 | .reset = gfx_v7_0_rlc_reset, | ||
4178 | .start = gfx_v7_0_rlc_start | ||
4293 | }; | 4179 | }; |
4294 | 4180 | ||
4295 | static int gfx_v7_0_early_init(void *handle) | 4181 | static int gfx_v7_0_early_init(void *handle) |
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle) | |||
4540 | return r; | 4426 | return r; |
4541 | } | 4427 | } |
4542 | 4428 | ||
4543 | r = gfx_v7_0_rlc_init(adev); | 4429 | r = adev->gfx.rlc.funcs->init(adev); |
4544 | if (r) { | 4430 | if (r) { |
4545 | DRM_ERROR("Failed to init rlc BOs!\n"); | 4431 | DRM_ERROR("Failed to init rlc BOs!\n"); |
4546 | return r; | 4432 | return r; |
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
4604 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | 4490 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); |
4605 | 4491 | ||
4606 | gfx_v7_0_cp_compute_fini(adev); | 4492 | gfx_v7_0_cp_compute_fini(adev); |
4607 | gfx_v7_0_rlc_fini(adev); | 4493 | amdgpu_gfx_rlc_fini(adev); |
4608 | gfx_v7_0_mec_fini(adev); | 4494 | gfx_v7_0_mec_fini(adev); |
4609 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, | 4495 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, |
4610 | &adev->gfx.rlc.clear_state_gpu_addr, | 4496 | &adev->gfx.rlc.clear_state_gpu_addr, |
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle) | |||
4627 | gfx_v7_0_constants_init(adev); | 4513 | gfx_v7_0_constants_init(adev); |
4628 | 4514 | ||
4629 | /* init rlc */ | 4515 | /* init rlc */ |
4630 | r = gfx_v7_0_rlc_resume(adev); | 4516 | r = adev->gfx.rlc.funcs->resume(adev); |
4631 | if (r) | 4517 | if (r) |
4632 | return r; | 4518 | return r; |
4633 | 4519 | ||
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle) | |||
4645 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); | 4531 | amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); |
4646 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); | 4532 | amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); |
4647 | gfx_v7_0_cp_enable(adev, false); | 4533 | gfx_v7_0_cp_enable(adev, false); |
4648 | gfx_v7_0_rlc_stop(adev); | 4534 | adev->gfx.rlc.funcs->stop(adev); |
4649 | gfx_v7_0_fini_pg(adev); | 4535 | gfx_v7_0_fini_pg(adev); |
4650 | 4536 | ||
4651 | return 0; | 4537 | return 0; |
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle) | |||
4730 | gfx_v7_0_update_cg(adev, false); | 4616 | gfx_v7_0_update_cg(adev, false); |
4731 | 4617 | ||
4732 | /* stop the rlc */ | 4618 | /* stop the rlc */ |
4733 | gfx_v7_0_rlc_stop(adev); | 4619 | adev->gfx.rlc.funcs->stop(adev); |
4734 | 4620 | ||
4735 | /* Disable GFX parsing/prefetching */ | 4621 | /* Disable GFX parsing/prefetching */ |
4736 | WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); | 4622 | WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); |
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, | |||
4959 | return 0; | 4845 | return 0; |
4960 | } | 4846 | } |
4961 | 4847 | ||
4848 | static void gfx_v7_0_fault(struct amdgpu_device *adev, | ||
4849 | struct amdgpu_iv_entry *entry) | ||
4850 | { | ||
4851 | struct amdgpu_ring *ring; | ||
4852 | u8 me_id, pipe_id; | ||
4853 | int i; | ||
4854 | |||
4855 | me_id = (entry->ring_id & 0x0c) >> 2; | ||
4856 | pipe_id = (entry->ring_id & 0x03) >> 0; | ||
4857 | switch (me_id) { | ||
4858 | case 0: | ||
4859 | drm_sched_fault(&adev->gfx.gfx_ring[0].sched); | ||
4860 | break; | ||
4861 | case 1: | ||
4862 | case 2: | ||
4863 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
4864 | ring = &adev->gfx.compute_ring[i]; | ||
4865 | if ((ring->me == me_id) && (ring->pipe == pipe_id)) | ||
4866 | drm_sched_fault(&ring->sched); | ||
4867 | } | ||
4868 | break; | ||
4869 | } | ||
4870 | } | ||
4871 | |||
4962 | static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, | 4872 | static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev, |
4963 | struct amdgpu_irq_src *source, | 4873 | struct amdgpu_irq_src *source, |
4964 | struct amdgpu_iv_entry *entry) | 4874 | struct amdgpu_iv_entry *entry) |
4965 | { | 4875 | { |
4966 | DRM_ERROR("Illegal register access in command stream\n"); | 4876 | DRM_ERROR("Illegal register access in command stream\n"); |
4967 | schedule_work(&adev->reset_work); | 4877 | gfx_v7_0_fault(adev, entry); |
4968 | return 0; | 4878 | return 0; |
4969 | } | 4879 | } |
4970 | 4880 | ||
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev, | |||
4974 | { | 4884 | { |
4975 | DRM_ERROR("Illegal instruction in command stream\n"); | 4885 | DRM_ERROR("Illegal instruction in command stream\n"); |
4976 | // XXX soft reset the gfx block only | 4886 | // XXX soft reset the gfx block only |
4977 | schedule_work(&adev->reset_work); | 4887 | gfx_v7_0_fault(adev, entry); |
4978 | return 0; | 4888 | return 0; |
4979 | } | 4889 | } |
4980 | 4890 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 617b0c8908a3..cb066a8dccd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include "ivsrcid/ivsrcid_vislands30.h" | 54 | #include "ivsrcid/ivsrcid_vislands30.h" |
55 | 55 | ||
56 | #define GFX8_NUM_GFX_RINGS 1 | 56 | #define GFX8_NUM_GFX_RINGS 1 |
57 | #define GFX8_MEC_HPD_SIZE 2048 | 57 | #define GFX8_MEC_HPD_SIZE 4096 |
58 | 58 | ||
59 | #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 | 59 | #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 |
60 | #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 | 60 | #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 |
@@ -839,18 +839,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) | |||
839 | int r; | 839 | int r; |
840 | 840 | ||
841 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 841 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
842 | if (r) { | 842 | if (r) |
843 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | ||
844 | return r; | 843 | return r; |
845 | } | 844 | |
846 | WREG32(scratch, 0xCAFEDEAD); | 845 | WREG32(scratch, 0xCAFEDEAD); |
847 | r = amdgpu_ring_alloc(ring, 3); | 846 | r = amdgpu_ring_alloc(ring, 3); |
848 | if (r) { | 847 | if (r) |
849 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 848 | goto error_free_scratch; |
850 | ring->idx, r); | 849 | |
851 | amdgpu_gfx_scratch_free(adev, scratch); | ||
852 | return r; | ||
853 | } | ||
854 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 850 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
855 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | 851 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); |
856 | amdgpu_ring_write(ring, 0xDEADBEEF); | 852 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -862,14 +858,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) | |||
862 | break; | 858 | break; |
863 | DRM_UDELAY(1); | 859 | DRM_UDELAY(1); |
864 | } | 860 | } |
865 | if (i < adev->usec_timeout) { | 861 | |
866 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 862 | if (i >= adev->usec_timeout) |
867 | ring->idx, i); | 863 | r = -ETIMEDOUT; |
868 | } else { | 864 | |
869 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | 865 | error_free_scratch: |
870 | ring->idx, scratch, tmp); | ||
871 | r = -EINVAL; | ||
872 | } | ||
873 | amdgpu_gfx_scratch_free(adev, scratch); | 866 | amdgpu_gfx_scratch_free(adev, scratch); |
874 | return r; | 867 | return r; |
875 | } | 868 | } |
@@ -886,19 +879,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
886 | long r; | 879 | long r; |
887 | 880 | ||
888 | r = amdgpu_device_wb_get(adev, &index); | 881 | r = amdgpu_device_wb_get(adev, &index); |
889 | if (r) { | 882 | if (r) |
890 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
891 | return r; | 883 | return r; |
892 | } | ||
893 | 884 | ||
894 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 885 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
895 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); | 886 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); |
896 | memset(&ib, 0, sizeof(ib)); | 887 | memset(&ib, 0, sizeof(ib)); |
897 | r = amdgpu_ib_get(adev, NULL, 16, &ib); | 888 | r = amdgpu_ib_get(adev, NULL, 16, &ib); |
898 | if (r) { | 889 | if (r) |
899 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
900 | goto err1; | 890 | goto err1; |
901 | } | 891 | |
902 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); | 892 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); |
903 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; | 893 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; |
904 | ib.ptr[2] = lower_32_bits(gpu_addr); | 894 | ib.ptr[2] = lower_32_bits(gpu_addr); |
@@ -912,22 +902,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
912 | 902 | ||
913 | r = dma_fence_wait_timeout(f, false, timeout); | 903 | r = dma_fence_wait_timeout(f, false, timeout); |
914 | if (r == 0) { | 904 | if (r == 0) { |
915 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
916 | r = -ETIMEDOUT; | 905 | r = -ETIMEDOUT; |
917 | goto err2; | 906 | goto err2; |
918 | } else if (r < 0) { | 907 | } else if (r < 0) { |
919 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
920 | goto err2; | 908 | goto err2; |
921 | } | 909 | } |
922 | 910 | ||
923 | tmp = adev->wb.wb[index]; | 911 | tmp = adev->wb.wb[index]; |
924 | if (tmp == 0xDEADBEEF) { | 912 | if (tmp == 0xDEADBEEF) |
925 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
926 | r = 0; | 913 | r = 0; |
927 | } else { | 914 | else |
928 | DRM_ERROR("ib test on ring %d failed\n", ring->idx); | ||
929 | r = -EINVAL; | 915 | r = -EINVAL; |
930 | } | ||
931 | 916 | ||
932 | err2: | 917 | err2: |
933 | amdgpu_ib_free(adev, &ib, NULL); | 918 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -1298,81 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | |||
1298 | buffer[count++] = cpu_to_le32(0); | 1283 | buffer[count++] = cpu_to_le32(0); |
1299 | } | 1284 | } |
1300 | 1285 | ||
1301 | static void cz_init_cp_jump_table(struct amdgpu_device *adev) | 1286 | static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) |
1302 | { | 1287 | { |
1303 | const __le32 *fw_data; | ||
1304 | volatile u32 *dst_ptr; | ||
1305 | int me, i, max_me = 4; | ||
1306 | u32 bo_offset = 0; | ||
1307 | u32 table_offset, table_size; | ||
1308 | |||
1309 | if (adev->asic_type == CHIP_CARRIZO) | 1288 | if (adev->asic_type == CHIP_CARRIZO) |
1310 | max_me = 5; | 1289 | return 5; |
1311 | 1290 | else | |
1312 | /* write the cp table buffer */ | 1291 | return 4; |
1313 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
1314 | for (me = 0; me < max_me; me++) { | ||
1315 | if (me == 0) { | ||
1316 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1317 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
1318 | fw_data = (const __le32 *) | ||
1319 | (adev->gfx.ce_fw->data + | ||
1320 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1321 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1322 | table_size = le32_to_cpu(hdr->jt_size); | ||
1323 | } else if (me == 1) { | ||
1324 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1325 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
1326 | fw_data = (const __le32 *) | ||
1327 | (adev->gfx.pfp_fw->data + | ||
1328 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1329 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1330 | table_size = le32_to_cpu(hdr->jt_size); | ||
1331 | } else if (me == 2) { | ||
1332 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1333 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
1334 | fw_data = (const __le32 *) | ||
1335 | (adev->gfx.me_fw->data + | ||
1336 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1337 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1338 | table_size = le32_to_cpu(hdr->jt_size); | ||
1339 | } else if (me == 3) { | ||
1340 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1341 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1342 | fw_data = (const __le32 *) | ||
1343 | (adev->gfx.mec_fw->data + | ||
1344 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1345 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1346 | table_size = le32_to_cpu(hdr->jt_size); | ||
1347 | } else if (me == 4) { | ||
1348 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1349 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
1350 | fw_data = (const __le32 *) | ||
1351 | (adev->gfx.mec2_fw->data + | ||
1352 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1353 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1354 | table_size = le32_to_cpu(hdr->jt_size); | ||
1355 | } | ||
1356 | |||
1357 | for (i = 0; i < table_size; i ++) { | ||
1358 | dst_ptr[bo_offset + i] = | ||
1359 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
1360 | } | ||
1361 | |||
1362 | bo_offset += table_size; | ||
1363 | } | ||
1364 | } | ||
1365 | |||
1366 | static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) | ||
1367 | { | ||
1368 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); | ||
1369 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); | ||
1370 | } | 1292 | } |
1371 | 1293 | ||
1372 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | 1294 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) |
1373 | { | 1295 | { |
1374 | volatile u32 *dst_ptr; | ||
1375 | u32 dws; | ||
1376 | const struct cs_section_def *cs_data; | 1296 | const struct cs_section_def *cs_data; |
1377 | int r; | 1297 | int r; |
1378 | 1298 | ||
@@ -1381,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1381 | cs_data = adev->gfx.rlc.cs_data; | 1301 | cs_data = adev->gfx.rlc.cs_data; |
1382 | 1302 | ||
1383 | if (cs_data) { | 1303 | if (cs_data) { |
1384 | /* clear state block */ | 1304 | /* init clear state block */ |
1385 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); | 1305 | r = amdgpu_gfx_rlc_init_csb(adev); |
1386 | 1306 | if (r) | |
1387 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
1388 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1389 | &adev->gfx.rlc.clear_state_obj, | ||
1390 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1391 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1392 | if (r) { | ||
1393 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
1394 | gfx_v8_0_rlc_fini(adev); | ||
1395 | return r; | 1307 | return r; |
1396 | } | ||
1397 | |||
1398 | /* set up the cs buffer */ | ||
1399 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1400 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); | ||
1401 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1402 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1403 | } | 1308 | } |
1404 | 1309 | ||
1405 | if ((adev->asic_type == CHIP_CARRIZO) || | 1310 | if ((adev->asic_type == CHIP_CARRIZO) || |
1406 | (adev->asic_type == CHIP_STONEY)) { | 1311 | (adev->asic_type == CHIP_STONEY)) { |
1407 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1312 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1408 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 1313 | r = amdgpu_gfx_rlc_init_cpt(adev); |
1409 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 1314 | if (r) |
1410 | &adev->gfx.rlc.cp_table_obj, | ||
1411 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1412 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1413 | if (r) { | ||
1414 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
1415 | return r; | 1315 | return r; |
1416 | } | ||
1417 | |||
1418 | cz_init_cp_jump_table(adev); | ||
1419 | |||
1420 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
1421 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1422 | } | 1316 | } |
1423 | 1317 | ||
1424 | return 0; | 1318 | return 0; |
@@ -1443,7 +1337,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | |||
1443 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; | 1337 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; |
1444 | 1338 | ||
1445 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, | 1339 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
1446 | AMDGPU_GEM_DOMAIN_GTT, | 1340 | AMDGPU_GEM_DOMAIN_VRAM, |
1447 | &adev->gfx.mec.hpd_eop_obj, | 1341 | &adev->gfx.mec.hpd_eop_obj, |
1448 | &adev->gfx.mec.hpd_eop_gpu_addr, | 1342 | &adev->gfx.mec.hpd_eop_gpu_addr, |
1449 | (void **)&hpd); | 1343 | (void **)&hpd); |
@@ -1629,7 +1523,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) | |||
1629 | return 0; | 1523 | return 0; |
1630 | 1524 | ||
1631 | /* bail if the compute ring is not ready */ | 1525 | /* bail if the compute ring is not ready */ |
1632 | if (!ring->ready) | 1526 | if (!ring->sched.ready) |
1633 | return 0; | 1527 | return 0; |
1634 | 1528 | ||
1635 | tmp = RREG32(mmGB_EDC_MODE); | 1529 | tmp = RREG32(mmGB_EDC_MODE); |
@@ -2088,7 +1982,7 @@ static int gfx_v8_0_sw_init(void *handle) | |||
2088 | return r; | 1982 | return r; |
2089 | } | 1983 | } |
2090 | 1984 | ||
2091 | r = gfx_v8_0_rlc_init(adev); | 1985 | r = adev->gfx.rlc.funcs->init(adev); |
2092 | if (r) { | 1986 | if (r) { |
2093 | DRM_ERROR("Failed to init rlc BOs!\n"); | 1987 | DRM_ERROR("Failed to init rlc BOs!\n"); |
2094 | return r; | 1988 | return r; |
@@ -2181,7 +2075,7 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
2181 | amdgpu_gfx_kiq_fini(adev); | 2075 | amdgpu_gfx_kiq_fini(adev); |
2182 | 2076 | ||
2183 | gfx_v8_0_mec_fini(adev); | 2077 | gfx_v8_0_mec_fini(adev); |
2184 | gfx_v8_0_rlc_fini(adev); | 2078 | amdgpu_gfx_rlc_fini(adev); |
2185 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, | 2079 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, |
2186 | &adev->gfx.rlc.clear_state_gpu_addr, | 2080 | &adev->gfx.rlc.clear_state_gpu_addr, |
2187 | (void **)&adev->gfx.rlc.cs_ptr); | 2081 | (void **)&adev->gfx.rlc.cs_ptr); |
@@ -4175,10 +4069,10 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) | |||
4175 | 4069 | ||
4176 | static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) | 4070 | static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) |
4177 | { | 4071 | { |
4178 | gfx_v8_0_rlc_stop(adev); | 4072 | adev->gfx.rlc.funcs->stop(adev); |
4179 | gfx_v8_0_rlc_reset(adev); | 4073 | adev->gfx.rlc.funcs->reset(adev); |
4180 | gfx_v8_0_init_pg(adev); | 4074 | gfx_v8_0_init_pg(adev); |
4181 | gfx_v8_0_rlc_start(adev); | 4075 | adev->gfx.rlc.funcs->start(adev); |
4182 | 4076 | ||
4183 | return 0; | 4077 | return 0; |
4184 | } | 4078 | } |
@@ -4197,7 +4091,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |||
4197 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); | 4091 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); |
4198 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); | 4092 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); |
4199 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 4093 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
4200 | adev->gfx.gfx_ring[i].ready = false; | 4094 | adev->gfx.gfx_ring[i].sched.ready = false; |
4201 | } | 4095 | } |
4202 | WREG32(mmCP_ME_CNTL, tmp); | 4096 | WREG32(mmCP_ME_CNTL, tmp); |
4203 | udelay(50); | 4097 | udelay(50); |
@@ -4379,10 +4273,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
4379 | /* start the ring */ | 4273 | /* start the ring */ |
4380 | amdgpu_ring_clear_ring(ring); | 4274 | amdgpu_ring_clear_ring(ring); |
4381 | gfx_v8_0_cp_gfx_start(adev); | 4275 | gfx_v8_0_cp_gfx_start(adev); |
4382 | ring->ready = true; | 4276 | ring->sched.ready = true; |
4383 | r = amdgpu_ring_test_ring(ring); | 4277 | r = amdgpu_ring_test_helper(ring); |
4384 | if (r) | ||
4385 | ring->ready = false; | ||
4386 | 4278 | ||
4387 | return r; | 4279 | return r; |
4388 | } | 4280 | } |
@@ -4396,8 +4288,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | |||
4396 | } else { | 4288 | } else { |
4397 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); | 4289 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
4398 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 4290 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
4399 | adev->gfx.compute_ring[i].ready = false; | 4291 | adev->gfx.compute_ring[i].sched.ready = false; |
4400 | adev->gfx.kiq.ring.ready = false; | 4292 | adev->gfx.kiq.ring.sched.ready = false; |
4401 | } | 4293 | } |
4402 | udelay(50); | 4294 | udelay(50); |
4403 | } | 4295 | } |
@@ -4473,11 +4365,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev) | |||
4473 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | 4365 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); |
4474 | } | 4366 | } |
4475 | 4367 | ||
4476 | r = amdgpu_ring_test_ring(kiq_ring); | 4368 | r = amdgpu_ring_test_helper(kiq_ring); |
4477 | if (r) { | 4369 | if (r) |
4478 | DRM_ERROR("KCQ enable failed\n"); | 4370 | DRM_ERROR("KCQ enable failed\n"); |
4479 | kiq_ring->ready = false; | ||
4480 | } | ||
4481 | return r; | 4371 | return r; |
4482 | } | 4372 | } |
4483 | 4373 | ||
@@ -4781,7 +4671,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) | |||
4781 | amdgpu_bo_kunmap(ring->mqd_obj); | 4671 | amdgpu_bo_kunmap(ring->mqd_obj); |
4782 | ring->mqd_ptr = NULL; | 4672 | ring->mqd_ptr = NULL; |
4783 | amdgpu_bo_unreserve(ring->mqd_obj); | 4673 | amdgpu_bo_unreserve(ring->mqd_obj); |
4784 | ring->ready = true; | 4674 | ring->sched.ready = true; |
4785 | return 0; | 4675 | return 0; |
4786 | } | 4676 | } |
4787 | 4677 | ||
@@ -4820,10 +4710,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev) | |||
4820 | */ | 4710 | */ |
4821 | for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { | 4711 | for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { |
4822 | ring = &adev->gfx.compute_ring[i]; | 4712 | ring = &adev->gfx.compute_ring[i]; |
4823 | ring->ready = true; | 4713 | r = amdgpu_ring_test_helper(ring); |
4824 | r = amdgpu_ring_test_ring(ring); | ||
4825 | if (r) | ||
4826 | ring->ready = false; | ||
4827 | } | 4714 | } |
4828 | 4715 | ||
4829 | done: | 4716 | done: |
@@ -4867,7 +4754,7 @@ static int gfx_v8_0_hw_init(void *handle) | |||
4867 | gfx_v8_0_init_golden_registers(adev); | 4754 | gfx_v8_0_init_golden_registers(adev); |
4868 | gfx_v8_0_constants_init(adev); | 4755 | gfx_v8_0_constants_init(adev); |
4869 | 4756 | ||
4870 | r = gfx_v8_0_rlc_resume(adev); | 4757 | r = adev->gfx.rlc.funcs->resume(adev); |
4871 | if (r) | 4758 | if (r) |
4872 | return r; | 4759 | return r; |
4873 | 4760 | ||
@@ -4899,7 +4786,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) | |||
4899 | amdgpu_ring_write(kiq_ring, 0); | 4786 | amdgpu_ring_write(kiq_ring, 0); |
4900 | amdgpu_ring_write(kiq_ring, 0); | 4787 | amdgpu_ring_write(kiq_ring, 0); |
4901 | } | 4788 | } |
4902 | r = amdgpu_ring_test_ring(kiq_ring); | 4789 | r = amdgpu_ring_test_helper(kiq_ring); |
4903 | if (r) | 4790 | if (r) |
4904 | DRM_ERROR("KCQ disable failed\n"); | 4791 | DRM_ERROR("KCQ disable failed\n"); |
4905 | 4792 | ||
@@ -4973,16 +4860,16 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
4973 | pr_debug("For SRIOV client, shouldn't do anything.\n"); | 4860 | pr_debug("For SRIOV client, shouldn't do anything.\n"); |
4974 | return 0; | 4861 | return 0; |
4975 | } | 4862 | } |
4976 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 4863 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
4977 | if (!gfx_v8_0_wait_for_idle(adev)) | 4864 | if (!gfx_v8_0_wait_for_idle(adev)) |
4978 | gfx_v8_0_cp_enable(adev, false); | 4865 | gfx_v8_0_cp_enable(adev, false); |
4979 | else | 4866 | else |
4980 | pr_err("cp is busy, skip halt cp\n"); | 4867 | pr_err("cp is busy, skip halt cp\n"); |
4981 | if (!gfx_v8_0_wait_for_rlc_idle(adev)) | 4868 | if (!gfx_v8_0_wait_for_rlc_idle(adev)) |
4982 | gfx_v8_0_rlc_stop(adev); | 4869 | adev->gfx.rlc.funcs->stop(adev); |
4983 | else | 4870 | else |
4984 | pr_err("rlc is busy, skip halt rlc\n"); | 4871 | pr_err("rlc is busy, skip halt rlc\n"); |
4985 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 4872 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
4986 | return 0; | 4873 | return 0; |
4987 | } | 4874 | } |
4988 | 4875 | ||
@@ -5071,7 +4958,7 @@ static int gfx_v8_0_pre_soft_reset(void *handle) | |||
5071 | srbm_soft_reset = adev->gfx.srbm_soft_reset; | 4958 | srbm_soft_reset = adev->gfx.srbm_soft_reset; |
5072 | 4959 | ||
5073 | /* stop the rlc */ | 4960 | /* stop the rlc */ |
5074 | gfx_v8_0_rlc_stop(adev); | 4961 | adev->gfx.rlc.funcs->stop(adev); |
5075 | 4962 | ||
5076 | if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || | 4963 | if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) || |
5077 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) | 4964 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) |
@@ -5197,7 +5084,7 @@ static int gfx_v8_0_post_soft_reset(void *handle) | |||
5197 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) | 5084 | REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX)) |
5198 | gfx_v8_0_cp_gfx_resume(adev); | 5085 | gfx_v8_0_cp_gfx_resume(adev); |
5199 | 5086 | ||
5200 | gfx_v8_0_rlc_start(adev); | 5087 | adev->gfx.rlc.funcs->start(adev); |
5201 | 5088 | ||
5202 | return 0; | 5089 | return 0; |
5203 | } | 5090 | } |
@@ -5445,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5445 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5332 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5446 | AMD_PG_SUPPORT_CP | | 5333 | AMD_PG_SUPPORT_CP | |
5447 | AMD_PG_SUPPORT_GFX_DMG)) | 5334 | AMD_PG_SUPPORT_GFX_DMG)) |
5448 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5335 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5449 | switch (adev->asic_type) { | 5336 | switch (adev->asic_type) { |
5450 | case CHIP_CARRIZO: | 5337 | case CHIP_CARRIZO: |
5451 | case CHIP_STONEY: | 5338 | case CHIP_STONEY: |
@@ -5499,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5499 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5386 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5500 | AMD_PG_SUPPORT_CP | | 5387 | AMD_PG_SUPPORT_CP | |
5501 | AMD_PG_SUPPORT_GFX_DMG)) | 5388 | AMD_PG_SUPPORT_GFX_DMG)) |
5502 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5389 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5503 | return 0; | 5390 | return 0; |
5504 | } | 5391 | } |
5505 | 5392 | ||
@@ -5593,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, | |||
5593 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 | 5480 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 |
5594 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e | 5481 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e |
5595 | 5482 | ||
5596 | static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) | 5483 | static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) |
5597 | { | 5484 | { |
5598 | u32 data; | 5485 | uint32_t rlc_setting; |
5599 | unsigned i; | ||
5600 | 5486 | ||
5601 | data = RREG32(mmRLC_CNTL); | 5487 | rlc_setting = RREG32(mmRLC_CNTL); |
5602 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5488 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
5603 | return; | 5489 | return false; |
5604 | 5490 | ||
5605 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | 5491 | return true; |
5606 | data |= RLC_SAFE_MODE__CMD_MASK; | 5492 | } |
5607 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5608 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | ||
5609 | WREG32(mmRLC_SAFE_MODE, data); | ||
5610 | 5493 | ||
5611 | for (i = 0; i < adev->usec_timeout; i++) { | 5494 | static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) |
5612 | if ((RREG32(mmRLC_GPM_STAT) & | 5495 | { |
5613 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5496 | uint32_t data; |
5614 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == | 5497 | unsigned i; |
5615 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5498 | data = RREG32(mmRLC_CNTL); |
5616 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | 5499 | data |= RLC_SAFE_MODE__CMD_MASK; |
5617 | break; | 5500 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5618 | udelay(1); | 5501 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
5619 | } | 5502 | WREG32(mmRLC_SAFE_MODE, data); |
5620 | 5503 | ||
5621 | for (i = 0; i < adev->usec_timeout; i++) { | 5504 | /* wait for RLC_SAFE_MODE */ |
5622 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5505 | for (i = 0; i < adev->usec_timeout; i++) { |
5623 | break; | 5506 | if ((RREG32(mmRLC_GPM_STAT) & |
5624 | udelay(1); | 5507 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5625 | } | 5508 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == |
5626 | adev->gfx.rlc.in_safe_mode = true; | 5509 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5510 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | ||
5511 | break; | ||
5512 | udelay(1); | ||
5513 | } | ||
5514 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5515 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | ||
5516 | break; | ||
5517 | udelay(1); | ||
5627 | } | 5518 | } |
5628 | } | 5519 | } |
5629 | 5520 | ||
5630 | static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | 5521 | static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) |
5631 | { | 5522 | { |
5632 | u32 data = 0; | 5523 | uint32_t data; |
5633 | unsigned i; | 5524 | unsigned i; |
5634 | 5525 | ||
5635 | data = RREG32(mmRLC_CNTL); | 5526 | data = RREG32(mmRLC_CNTL); |
5636 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5527 | data |= RLC_SAFE_MODE__CMD_MASK; |
5637 | return; | 5528 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5638 | 5529 | WREG32(mmRLC_SAFE_MODE, data); | |
5639 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | ||
5640 | if (adev->gfx.rlc.in_safe_mode) { | ||
5641 | data |= RLC_SAFE_MODE__CMD_MASK; | ||
5642 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5643 | WREG32(mmRLC_SAFE_MODE, data); | ||
5644 | adev->gfx.rlc.in_safe_mode = false; | ||
5645 | } | ||
5646 | } | ||
5647 | 5530 | ||
5648 | for (i = 0; i < adev->usec_timeout; i++) { | 5531 | for (i = 0; i < adev->usec_timeout; i++) { |
5649 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5532 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
@@ -5653,8 +5536,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | |||
5653 | } | 5536 | } |
5654 | 5537 | ||
5655 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { | 5538 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { |
5656 | .enter_safe_mode = iceland_enter_rlc_safe_mode, | 5539 | .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, |
5657 | .exit_safe_mode = iceland_exit_rlc_safe_mode | 5540 | .set_safe_mode = gfx_v8_0_set_safe_mode, |
5541 | .unset_safe_mode = gfx_v8_0_unset_safe_mode, | ||
5542 | .init = gfx_v8_0_rlc_init, | ||
5543 | .get_csb_size = gfx_v8_0_get_csb_size, | ||
5544 | .get_csb_buffer = gfx_v8_0_get_csb_buffer, | ||
5545 | .get_cp_table_num = gfx_v8_0_cp_jump_table_num, | ||
5546 | .resume = gfx_v8_0_rlc_resume, | ||
5547 | .stop = gfx_v8_0_rlc_stop, | ||
5548 | .reset = gfx_v8_0_rlc_reset, | ||
5549 | .start = gfx_v8_0_rlc_start | ||
5658 | }; | 5550 | }; |
5659 | 5551 | ||
5660 | static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, | 5552 | static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
@@ -5662,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5662 | { | 5554 | { |
5663 | uint32_t temp, data; | 5555 | uint32_t temp, data; |
5664 | 5556 | ||
5665 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5557 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5666 | 5558 | ||
5667 | /* It is disabled by HW by default */ | 5559 | /* It is disabled by HW by default */ |
5668 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | 5560 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
@@ -5758,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5758 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5650 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5759 | } | 5651 | } |
5760 | 5652 | ||
5761 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5653 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5762 | } | 5654 | } |
5763 | 5655 | ||
5764 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | 5656 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
@@ -5768,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5768 | 5660 | ||
5769 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); | 5661 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); |
5770 | 5662 | ||
5771 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5663 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5772 | 5664 | ||
5773 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | 5665 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
5774 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); | 5666 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); |
@@ -5851,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5851 | 5743 | ||
5852 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5744 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5853 | 5745 | ||
5854 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5746 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5855 | } | 5747 | } |
5856 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, | 5748 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, |
5857 | bool enable) | 5749 | bool enable) |
@@ -6131,9 +6023,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) | |||
6131 | } | 6023 | } |
6132 | 6024 | ||
6133 | static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | 6025 | static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
6134 | struct amdgpu_ib *ib, | 6026 | struct amdgpu_job *job, |
6135 | unsigned vmid, bool ctx_switch) | 6027 | struct amdgpu_ib *ib, |
6028 | bool ctx_switch) | ||
6136 | { | 6029 | { |
6030 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
6137 | u32 header, control = 0; | 6031 | u32 header, control = 0; |
6138 | 6032 | ||
6139 | if (ib->flags & AMDGPU_IB_FLAG_CE) | 6033 | if (ib->flags & AMDGPU_IB_FLAG_CE) |
@@ -6161,9 +6055,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | |||
6161 | } | 6055 | } |
6162 | 6056 | ||
6163 | static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | 6057 | static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, |
6058 | struct amdgpu_job *job, | ||
6164 | struct amdgpu_ib *ib, | 6059 | struct amdgpu_ib *ib, |
6165 | unsigned vmid, bool ctx_switch) | 6060 | bool ctx_switch) |
6166 | { | 6061 | { |
6062 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
6167 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); | 6063 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); |
6168 | 6064 | ||
6169 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 6065 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
@@ -6738,12 +6634,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev, | |||
6738 | return 0; | 6634 | return 0; |
6739 | } | 6635 | } |
6740 | 6636 | ||
6637 | static void gfx_v8_0_fault(struct amdgpu_device *adev, | ||
6638 | struct amdgpu_iv_entry *entry) | ||
6639 | { | ||
6640 | u8 me_id, pipe_id, queue_id; | ||
6641 | struct amdgpu_ring *ring; | ||
6642 | int i; | ||
6643 | |||
6644 | me_id = (entry->ring_id & 0x0c) >> 2; | ||
6645 | pipe_id = (entry->ring_id & 0x03) >> 0; | ||
6646 | queue_id = (entry->ring_id & 0x70) >> 4; | ||
6647 | |||
6648 | switch (me_id) { | ||
6649 | case 0: | ||
6650 | drm_sched_fault(&adev->gfx.gfx_ring[0].sched); | ||
6651 | break; | ||
6652 | case 1: | ||
6653 | case 2: | ||
6654 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
6655 | ring = &adev->gfx.compute_ring[i]; | ||
6656 | if (ring->me == me_id && ring->pipe == pipe_id && | ||
6657 | ring->queue == queue_id) | ||
6658 | drm_sched_fault(&ring->sched); | ||
6659 | } | ||
6660 | break; | ||
6661 | } | ||
6662 | } | ||
6663 | |||
6741 | static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, | 6664 | static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, |
6742 | struct amdgpu_irq_src *source, | 6665 | struct amdgpu_irq_src *source, |
6743 | struct amdgpu_iv_entry *entry) | 6666 | struct amdgpu_iv_entry *entry) |
6744 | { | 6667 | { |
6745 | DRM_ERROR("Illegal register access in command stream\n"); | 6668 | DRM_ERROR("Illegal register access in command stream\n"); |
6746 | schedule_work(&adev->reset_work); | 6669 | gfx_v8_0_fault(adev, entry); |
6747 | return 0; | 6670 | return 0; |
6748 | } | 6671 | } |
6749 | 6672 | ||
@@ -6752,7 +6675,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, | |||
6752 | struct amdgpu_iv_entry *entry) | 6675 | struct amdgpu_iv_entry *entry) |
6753 | { | 6676 | { |
6754 | DRM_ERROR("Illegal instruction in command stream\n"); | 6677 | DRM_ERROR("Illegal instruction in command stream\n"); |
6755 | schedule_work(&adev->reset_work); | 6678 | gfx_v8_0_fault(adev, entry); |
6756 | return 0; | 6679 | return 0; |
6757 | } | 6680 | } |
6758 | 6681 | ||
@@ -6976,10 +6899,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { | |||
6976 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ | 6899 | 17 + /* gfx_v8_0_ring_emit_vm_flush */ |
6977 | 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ | 6900 | 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ |
6978 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ | 6901 | .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ |
6979 | .emit_ib = gfx_v8_0_ring_emit_ib_compute, | ||
6980 | .emit_fence = gfx_v8_0_ring_emit_fence_kiq, | 6902 | .emit_fence = gfx_v8_0_ring_emit_fence_kiq, |
6981 | .test_ring = gfx_v8_0_ring_test_ring, | 6903 | .test_ring = gfx_v8_0_ring_test_ring, |
6982 | .test_ib = gfx_v8_0_ring_test_ib, | ||
6983 | .insert_nop = amdgpu_ring_insert_nop, | 6904 | .insert_nop = amdgpu_ring_insert_nop, |
6984 | .pad_ib = amdgpu_ring_generic_pad_ib, | 6905 | .pad_ib = amdgpu_ring_generic_pad_ib, |
6985 | .emit_rreg = gfx_v8_0_ring_emit_rreg, | 6906 | .emit_rreg = gfx_v8_0_ring_emit_rreg, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6d7baf59d6e1..c27caa144c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -41,7 +41,7 @@ | |||
41 | #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" | 41 | #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" |
42 | 42 | ||
43 | #define GFX9_NUM_GFX_RINGS 1 | 43 | #define GFX9_NUM_GFX_RINGS 1 |
44 | #define GFX9_MEC_HPD_SIZE 2048 | 44 | #define GFX9_MEC_HPD_SIZE 4096 |
45 | #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L | 45 | #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L |
46 | #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L | 46 | #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L |
47 | 47 | ||
@@ -396,18 +396,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) | |||
396 | int r; | 396 | int r; |
397 | 397 | ||
398 | r = amdgpu_gfx_scratch_get(adev, &scratch); | 398 | r = amdgpu_gfx_scratch_get(adev, &scratch); |
399 | if (r) { | 399 | if (r) |
400 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | ||
401 | return r; | 400 | return r; |
402 | } | 401 | |
403 | WREG32(scratch, 0xCAFEDEAD); | 402 | WREG32(scratch, 0xCAFEDEAD); |
404 | r = amdgpu_ring_alloc(ring, 3); | 403 | r = amdgpu_ring_alloc(ring, 3); |
405 | if (r) { | 404 | if (r) |
406 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | 405 | goto error_free_scratch; |
407 | ring->idx, r); | 406 | |
408 | amdgpu_gfx_scratch_free(adev, scratch); | ||
409 | return r; | ||
410 | } | ||
411 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | 407 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
412 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | 408 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); |
413 | amdgpu_ring_write(ring, 0xDEADBEEF); | 409 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -419,14 +415,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring) | |||
419 | break; | 415 | break; |
420 | DRM_UDELAY(1); | 416 | DRM_UDELAY(1); |
421 | } | 417 | } |
422 | if (i < adev->usec_timeout) { | 418 | |
423 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 419 | if (i >= adev->usec_timeout) |
424 | ring->idx, i); | 420 | r = -ETIMEDOUT; |
425 | } else { | 421 | |
426 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | 422 | error_free_scratch: |
427 | ring->idx, scratch, tmp); | ||
428 | r = -EINVAL; | ||
429 | } | ||
430 | amdgpu_gfx_scratch_free(adev, scratch); | 423 | amdgpu_gfx_scratch_free(adev, scratch); |
431 | return r; | 424 | return r; |
432 | } | 425 | } |
@@ -443,19 +436,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
443 | long r; | 436 | long r; |
444 | 437 | ||
445 | r = amdgpu_device_wb_get(adev, &index); | 438 | r = amdgpu_device_wb_get(adev, &index); |
446 | if (r) { | 439 | if (r) |
447 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
448 | return r; | 440 | return r; |
449 | } | ||
450 | 441 | ||
451 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 442 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
452 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); | 443 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); |
453 | memset(&ib, 0, sizeof(ib)); | 444 | memset(&ib, 0, sizeof(ib)); |
454 | r = amdgpu_ib_get(adev, NULL, 16, &ib); | 445 | r = amdgpu_ib_get(adev, NULL, 16, &ib); |
455 | if (r) { | 446 | if (r) |
456 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
457 | goto err1; | 447 | goto err1; |
458 | } | 448 | |
459 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); | 449 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); |
460 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; | 450 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; |
461 | ib.ptr[2] = lower_32_bits(gpu_addr); | 451 | ib.ptr[2] = lower_32_bits(gpu_addr); |
@@ -469,22 +459,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
469 | 459 | ||
470 | r = dma_fence_wait_timeout(f, false, timeout); | 460 | r = dma_fence_wait_timeout(f, false, timeout); |
471 | if (r == 0) { | 461 | if (r == 0) { |
472 | DRM_ERROR("amdgpu: IB test timed out.\n"); | 462 | r = -ETIMEDOUT; |
473 | r = -ETIMEDOUT; | 463 | goto err2; |
474 | goto err2; | ||
475 | } else if (r < 0) { | 464 | } else if (r < 0) { |
476 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | 465 | goto err2; |
477 | goto err2; | ||
478 | } | 466 | } |
479 | 467 | ||
480 | tmp = adev->wb.wb[index]; | 468 | tmp = adev->wb.wb[index]; |
481 | if (tmp == 0xDEADBEEF) { | 469 | if (tmp == 0xDEADBEEF) |
482 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | 470 | r = 0; |
483 | r = 0; | 471 | else |
484 | } else { | 472 | r = -EINVAL; |
485 | DRM_ERROR("ib test on ring %d failed\n", ring->idx); | ||
486 | r = -EINVAL; | ||
487 | } | ||
488 | 473 | ||
489 | err2: | 474 | err2: |
490 | amdgpu_ib_free(adev, &ib, NULL); | 475 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -1065,85 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) | |||
1065 | WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); | 1050 | WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); |
1066 | } | 1051 | } |
1067 | 1052 | ||
1068 | static void rv_init_cp_jump_table(struct amdgpu_device *adev) | 1053 | static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) |
1069 | { | ||
1070 | const __le32 *fw_data; | ||
1071 | volatile u32 *dst_ptr; | ||
1072 | int me, i, max_me = 5; | ||
1073 | u32 bo_offset = 0; | ||
1074 | u32 table_offset, table_size; | ||
1075 | |||
1076 | /* write the cp table buffer */ | ||
1077 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
1078 | for (me = 0; me < max_me; me++) { | ||
1079 | if (me == 0) { | ||
1080 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1081 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
1082 | fw_data = (const __le32 *) | ||
1083 | (adev->gfx.ce_fw->data + | ||
1084 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1085 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1086 | table_size = le32_to_cpu(hdr->jt_size); | ||
1087 | } else if (me == 1) { | ||
1088 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1089 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
1090 | fw_data = (const __le32 *) | ||
1091 | (adev->gfx.pfp_fw->data + | ||
1092 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1093 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1094 | table_size = le32_to_cpu(hdr->jt_size); | ||
1095 | } else if (me == 2) { | ||
1096 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1097 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
1098 | fw_data = (const __le32 *) | ||
1099 | (adev->gfx.me_fw->data + | ||
1100 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1101 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1102 | table_size = le32_to_cpu(hdr->jt_size); | ||
1103 | } else if (me == 3) { | ||
1104 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1105 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1106 | fw_data = (const __le32 *) | ||
1107 | (adev->gfx.mec_fw->data + | ||
1108 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1109 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1110 | table_size = le32_to_cpu(hdr->jt_size); | ||
1111 | } else if (me == 4) { | ||
1112 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1113 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
1114 | fw_data = (const __le32 *) | ||
1115 | (adev->gfx.mec2_fw->data + | ||
1116 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1117 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1118 | table_size = le32_to_cpu(hdr->jt_size); | ||
1119 | } | ||
1120 | |||
1121 | for (i = 0; i < table_size; i ++) { | ||
1122 | dst_ptr[bo_offset + i] = | ||
1123 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
1124 | } | ||
1125 | |||
1126 | bo_offset += table_size; | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev) | ||
1131 | { | 1054 | { |
1132 | /* clear state block */ | 1055 | return 5; |
1133 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, | ||
1134 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1135 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1136 | |||
1137 | /* jump table block */ | ||
1138 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, | ||
1139 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1140 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1141 | } | 1056 | } |
1142 | 1057 | ||
1143 | static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | 1058 | static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) |
1144 | { | 1059 | { |
1145 | volatile u32 *dst_ptr; | ||
1146 | u32 dws; | ||
1147 | const struct cs_section_def *cs_data; | 1060 | const struct cs_section_def *cs_data; |
1148 | int r; | 1061 | int r; |
1149 | 1062 | ||
@@ -1152,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |||
1152 | cs_data = adev->gfx.rlc.cs_data; | 1065 | cs_data = adev->gfx.rlc.cs_data; |
1153 | 1066 | ||
1154 | if (cs_data) { | 1067 | if (cs_data) { |
1155 | /* clear state block */ | 1068 | /* init clear state block */ |
1156 | adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); | 1069 | r = amdgpu_gfx_rlc_init_csb(adev); |
1157 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 1070 | if (r) |
1158 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1159 | &adev->gfx.rlc.clear_state_obj, | ||
1160 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1161 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1162 | if (r) { | ||
1163 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", | ||
1164 | r); | ||
1165 | gfx_v9_0_rlc_fini(adev); | ||
1166 | return r; | 1071 | return r; |
1167 | } | ||
1168 | /* set up the cs buffer */ | ||
1169 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1170 | gfx_v9_0_get_csb_buffer(adev, dst_ptr); | ||
1171 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1172 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
1173 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1174 | } | 1072 | } |
1175 | 1073 | ||
1176 | if (adev->asic_type == CHIP_RAVEN) { | 1074 | if (adev->asic_type == CHIP_RAVEN) { |
1177 | /* TODO: double check the cp_table_size for RV */ | 1075 | /* TODO: double check the cp_table_size for RV */ |
1178 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1076 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1179 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 1077 | r = amdgpu_gfx_rlc_init_cpt(adev); |
1180 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 1078 | if (r) |
1181 | &adev->gfx.rlc.cp_table_obj, | ||
1182 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1183 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1184 | if (r) { | ||
1185 | dev_err(adev->dev, | ||
1186 | "(%d) failed to create cp table bo\n", r); | ||
1187 | gfx_v9_0_rlc_fini(adev); | ||
1188 | return r; | 1079 | return r; |
1189 | } | ||
1190 | |||
1191 | rv_init_cp_jump_table(adev); | ||
1192 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
1193 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1194 | } | 1080 | } |
1195 | 1081 | ||
1196 | switch (adev->asic_type) { | 1082 | switch (adev->asic_type) { |
@@ -1264,7 +1150,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | |||
1264 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; | 1150 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; |
1265 | 1151 | ||
1266 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, | 1152 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
1267 | AMDGPU_GEM_DOMAIN_GTT, | 1153 | AMDGPU_GEM_DOMAIN_VRAM, |
1268 | &adev->gfx.mec.hpd_eop_obj, | 1154 | &adev->gfx.mec.hpd_eop_obj, |
1269 | &adev->gfx.mec.hpd_eop_gpu_addr, | 1155 | &adev->gfx.mec.hpd_eop_gpu_addr, |
1270 | (void **)&hpd); | 1156 | (void **)&hpd); |
@@ -1635,8 +1521,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) | |||
1635 | /* Clear GDS reserved memory */ | 1521 | /* Clear GDS reserved memory */ |
1636 | r = amdgpu_ring_alloc(ring, 17); | 1522 | r = amdgpu_ring_alloc(ring, 17); |
1637 | if (r) { | 1523 | if (r) { |
1638 | DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n", | 1524 | DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", |
1639 | ring->idx, r); | 1525 | ring->name, r); |
1640 | return r; | 1526 | return r; |
1641 | } | 1527 | } |
1642 | 1528 | ||
@@ -1748,7 +1634,7 @@ static int gfx_v9_0_sw_init(void *handle) | |||
1748 | return r; | 1634 | return r; |
1749 | } | 1635 | } |
1750 | 1636 | ||
1751 | r = gfx_v9_0_rlc_init(adev); | 1637 | r = adev->gfx.rlc.funcs->init(adev); |
1752 | if (r) { | 1638 | if (r) { |
1753 | DRM_ERROR("Failed to init rlc BOs!\n"); | 1639 | DRM_ERROR("Failed to init rlc BOs!\n"); |
1754 | return r; | 1640 | return r; |
@@ -2498,12 +2384,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) | |||
2498 | return 0; | 2384 | return 0; |
2499 | } | 2385 | } |
2500 | 2386 | ||
2501 | gfx_v9_0_rlc_stop(adev); | 2387 | adev->gfx.rlc.funcs->stop(adev); |
2502 | 2388 | ||
2503 | /* disable CG */ | 2389 | /* disable CG */ |
2504 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); | 2390 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); |
2505 | 2391 | ||
2506 | gfx_v9_0_rlc_reset(adev); | 2392 | adev->gfx.rlc.funcs->reset(adev); |
2507 | 2393 | ||
2508 | gfx_v9_0_init_pg(adev); | 2394 | gfx_v9_0_init_pg(adev); |
2509 | 2395 | ||
@@ -2514,15 +2400,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) | |||
2514 | return r; | 2400 | return r; |
2515 | } | 2401 | } |
2516 | 2402 | ||
2517 | if (adev->asic_type == CHIP_RAVEN || | 2403 | switch (adev->asic_type) { |
2518 | adev->asic_type == CHIP_VEGA20) { | 2404 | case CHIP_RAVEN: |
2519 | if (amdgpu_lbpw != 0) | 2405 | if (amdgpu_lbpw == 0) |
2406 | gfx_v9_0_enable_lbpw(adev, false); | ||
2407 | else | ||
2408 | gfx_v9_0_enable_lbpw(adev, true); | ||
2409 | break; | ||
2410 | case CHIP_VEGA20: | ||
2411 | if (amdgpu_lbpw > 0) | ||
2520 | gfx_v9_0_enable_lbpw(adev, true); | 2412 | gfx_v9_0_enable_lbpw(adev, true); |
2521 | else | 2413 | else |
2522 | gfx_v9_0_enable_lbpw(adev, false); | 2414 | gfx_v9_0_enable_lbpw(adev, false); |
2415 | break; | ||
2416 | default: | ||
2417 | break; | ||
2523 | } | 2418 | } |
2524 | 2419 | ||
2525 | gfx_v9_0_rlc_start(adev); | 2420 | adev->gfx.rlc.funcs->start(adev); |
2526 | 2421 | ||
2527 | return 0; | 2422 | return 0; |
2528 | } | 2423 | } |
@@ -2537,7 +2432,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | |||
2537 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); | 2432 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); |
2538 | if (!enable) { | 2433 | if (!enable) { |
2539 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | 2434 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) |
2540 | adev->gfx.gfx_ring[i].ready = false; | 2435 | adev->gfx.gfx_ring[i].sched.ready = false; |
2541 | } | 2436 | } |
2542 | WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); | 2437 | WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); |
2543 | udelay(50); | 2438 | udelay(50); |
@@ -2727,7 +2622,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev) | |||
2727 | 2622 | ||
2728 | /* start the ring */ | 2623 | /* start the ring */ |
2729 | gfx_v9_0_cp_gfx_start(adev); | 2624 | gfx_v9_0_cp_gfx_start(adev); |
2730 | ring->ready = true; | 2625 | ring->sched.ready = true; |
2731 | 2626 | ||
2732 | return 0; | 2627 | return 0; |
2733 | } | 2628 | } |
@@ -2742,8 +2637,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | |||
2742 | WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, | 2637 | WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, |
2743 | (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); | 2638 | (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
2744 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | 2639 | for (i = 0; i < adev->gfx.num_compute_rings; i++) |
2745 | adev->gfx.compute_ring[i].ready = false; | 2640 | adev->gfx.compute_ring[i].sched.ready = false; |
2746 | adev->gfx.kiq.ring.ready = false; | 2641 | adev->gfx.kiq.ring.sched.ready = false; |
2747 | } | 2642 | } |
2748 | udelay(50); | 2643 | udelay(50); |
2749 | } | 2644 | } |
@@ -2866,11 +2761,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) | |||
2866 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); | 2761 | amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); |
2867 | } | 2762 | } |
2868 | 2763 | ||
2869 | r = amdgpu_ring_test_ring(kiq_ring); | 2764 | r = amdgpu_ring_test_helper(kiq_ring); |
2870 | if (r) { | 2765 | if (r) |
2871 | DRM_ERROR("KCQ enable failed\n"); | 2766 | DRM_ERROR("KCQ enable failed\n"); |
2872 | kiq_ring->ready = false; | ||
2873 | } | ||
2874 | 2767 | ||
2875 | return r; | 2768 | return r; |
2876 | } | 2769 | } |
@@ -3249,7 +3142,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev) | |||
3249 | amdgpu_bo_kunmap(ring->mqd_obj); | 3142 | amdgpu_bo_kunmap(ring->mqd_obj); |
3250 | ring->mqd_ptr = NULL; | 3143 | ring->mqd_ptr = NULL; |
3251 | amdgpu_bo_unreserve(ring->mqd_obj); | 3144 | amdgpu_bo_unreserve(ring->mqd_obj); |
3252 | ring->ready = true; | 3145 | ring->sched.ready = true; |
3253 | return 0; | 3146 | return 0; |
3254 | } | 3147 | } |
3255 | 3148 | ||
@@ -3314,19 +3207,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev) | |||
3314 | return r; | 3207 | return r; |
3315 | 3208 | ||
3316 | ring = &adev->gfx.gfx_ring[0]; | 3209 | ring = &adev->gfx.gfx_ring[0]; |
3317 | r = amdgpu_ring_test_ring(ring); | 3210 | r = amdgpu_ring_test_helper(ring); |
3318 | if (r) { | 3211 | if (r) |
3319 | ring->ready = false; | ||
3320 | return r; | 3212 | return r; |
3321 | } | ||
3322 | 3213 | ||
3323 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 3214 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
3324 | ring = &adev->gfx.compute_ring[i]; | 3215 | ring = &adev->gfx.compute_ring[i]; |
3325 | 3216 | amdgpu_ring_test_helper(ring); | |
3326 | ring->ready = true; | ||
3327 | r = amdgpu_ring_test_ring(ring); | ||
3328 | if (r) | ||
3329 | ring->ready = false; | ||
3330 | } | 3217 | } |
3331 | 3218 | ||
3332 | gfx_v9_0_enable_gui_idle_interrupt(adev, true); | 3219 | gfx_v9_0_enable_gui_idle_interrupt(adev, true); |
@@ -3353,7 +3240,7 @@ static int gfx_v9_0_hw_init(void *handle) | |||
3353 | if (r) | 3240 | if (r) |
3354 | return r; | 3241 | return r; |
3355 | 3242 | ||
3356 | r = gfx_v9_0_rlc_resume(adev); | 3243 | r = adev->gfx.rlc.funcs->resume(adev); |
3357 | if (r) | 3244 | if (r) |
3358 | return r; | 3245 | return r; |
3359 | 3246 | ||
@@ -3391,7 +3278,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev) | |||
3391 | amdgpu_ring_write(kiq_ring, 0); | 3278 | amdgpu_ring_write(kiq_ring, 0); |
3392 | amdgpu_ring_write(kiq_ring, 0); | 3279 | amdgpu_ring_write(kiq_ring, 0); |
3393 | } | 3280 | } |
3394 | r = amdgpu_ring_test_ring(kiq_ring); | 3281 | r = amdgpu_ring_test_helper(kiq_ring); |
3395 | if (r) | 3282 | if (r) |
3396 | DRM_ERROR("KCQ disable failed\n"); | 3283 | DRM_ERROR("KCQ disable failed\n"); |
3397 | 3284 | ||
@@ -3433,7 +3320,7 @@ static int gfx_v9_0_hw_fini(void *handle) | |||
3433 | } | 3320 | } |
3434 | 3321 | ||
3435 | gfx_v9_0_cp_enable(adev, false); | 3322 | gfx_v9_0_cp_enable(adev, false); |
3436 | gfx_v9_0_rlc_stop(adev); | 3323 | adev->gfx.rlc.funcs->stop(adev); |
3437 | 3324 | ||
3438 | gfx_v9_0_csb_vram_unpin(adev); | 3325 | gfx_v9_0_csb_vram_unpin(adev); |
3439 | 3326 | ||
@@ -3508,7 +3395,7 @@ static int gfx_v9_0_soft_reset(void *handle) | |||
3508 | 3395 | ||
3509 | if (grbm_soft_reset) { | 3396 | if (grbm_soft_reset) { |
3510 | /* stop the rlc */ | 3397 | /* stop the rlc */ |
3511 | gfx_v9_0_rlc_stop(adev); | 3398 | adev->gfx.rlc.funcs->stop(adev); |
3512 | 3399 | ||
3513 | /* Disable GFX parsing/prefetching */ | 3400 | /* Disable GFX parsing/prefetching */ |
3514 | gfx_v9_0_cp_gfx_enable(adev, false); | 3401 | gfx_v9_0_cp_gfx_enable(adev, false); |
@@ -3607,64 +3494,47 @@ static int gfx_v9_0_late_init(void *handle) | |||
3607 | return 0; | 3494 | return 0; |
3608 | } | 3495 | } |
3609 | 3496 | ||
3610 | static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | 3497 | static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev) |
3611 | { | 3498 | { |
3612 | uint32_t rlc_setting, data; | 3499 | uint32_t rlc_setting; |
3613 | unsigned i; | ||
3614 | |||
3615 | if (adev->gfx.rlc.in_safe_mode) | ||
3616 | return; | ||
3617 | 3500 | ||
3618 | /* if RLC is not enabled, do nothing */ | 3501 | /* if RLC is not enabled, do nothing */ |
3619 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); | 3502 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); |
3620 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 3503 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
3621 | return; | 3504 | return false; |
3622 | |||
3623 | if (adev->cg_flags & | ||
3624 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
3625 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
3626 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3627 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | ||
3628 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3629 | 3505 | ||
3630 | /* wait for RLC_SAFE_MODE */ | 3506 | return true; |
3631 | for (i = 0; i < adev->usec_timeout; i++) { | ||
3632 | if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | ||
3633 | break; | ||
3634 | udelay(1); | ||
3635 | } | ||
3636 | adev->gfx.rlc.in_safe_mode = true; | ||
3637 | } | ||
3638 | } | 3507 | } |
3639 | 3508 | ||
3640 | static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) | 3509 | static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) |
3641 | { | 3510 | { |
3642 | uint32_t rlc_setting, data; | 3511 | uint32_t data; |
3643 | 3512 | unsigned i; | |
3644 | if (!adev->gfx.rlc.in_safe_mode) | ||
3645 | return; | ||
3646 | 3513 | ||
3647 | /* if RLC is not enabled, do nothing */ | 3514 | data = RLC_SAFE_MODE__CMD_MASK; |
3648 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); | 3515 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
3649 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 3516 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); |
3650 | return; | ||
3651 | 3517 | ||
3652 | if (adev->cg_flags & | 3518 | /* wait for RLC_SAFE_MODE */ |
3653 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | 3519 | for (i = 0; i < adev->usec_timeout; i++) { |
3654 | /* | 3520 | if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
3655 | * Try to exit safe mode only if it is already in safe | 3521 | break; |
3656 | * mode. | 3522 | udelay(1); |
3657 | */ | ||
3658 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3659 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3660 | adev->gfx.rlc.in_safe_mode = false; | ||
3661 | } | 3523 | } |
3662 | } | 3524 | } |
3663 | 3525 | ||
3526 | static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) | ||
3527 | { | ||
3528 | uint32_t data; | ||
3529 | |||
3530 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3531 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3532 | } | ||
3533 | |||
3664 | static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, | 3534 | static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, |
3665 | bool enable) | 3535 | bool enable) |
3666 | { | 3536 | { |
3667 | gfx_v9_0_enter_rlc_safe_mode(adev); | 3537 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3668 | 3538 | ||
3669 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { | 3539 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { |
3670 | gfx_v9_0_enable_gfx_cg_power_gating(adev, true); | 3540 | gfx_v9_0_enable_gfx_cg_power_gating(adev, true); |
@@ -3675,7 +3545,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, | |||
3675 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); | 3545 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); |
3676 | } | 3546 | } |
3677 | 3547 | ||
3678 | gfx_v9_0_exit_rlc_safe_mode(adev); | 3548 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3679 | } | 3549 | } |
3680 | 3550 | ||
3681 | static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, | 3551 | static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, |
@@ -3773,7 +3643,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | |||
3773 | { | 3643 | { |
3774 | uint32_t data, def; | 3644 | uint32_t data, def; |
3775 | 3645 | ||
3776 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 3646 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3777 | 3647 | ||
3778 | /* Enable 3D CGCG/CGLS */ | 3648 | /* Enable 3D CGCG/CGLS */ |
3779 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { | 3649 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { |
@@ -3813,7 +3683,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | |||
3813 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); | 3683 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); |
3814 | } | 3684 | } |
3815 | 3685 | ||
3816 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 3686 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3817 | } | 3687 | } |
3818 | 3688 | ||
3819 | static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | 3689 | static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
@@ -3821,7 +3691,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
3821 | { | 3691 | { |
3822 | uint32_t def, data; | 3692 | uint32_t def, data; |
3823 | 3693 | ||
3824 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 3694 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3825 | 3695 | ||
3826 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | 3696 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
3827 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); | 3697 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
@@ -3861,7 +3731,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
3861 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); | 3731 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); |
3862 | } | 3732 | } |
3863 | 3733 | ||
3864 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 3734 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3865 | } | 3735 | } |
3866 | 3736 | ||
3867 | static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, | 3737 | static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, |
@@ -3890,8 +3760,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, | |||
3890 | } | 3760 | } |
3891 | 3761 | ||
3892 | static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { | 3762 | static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { |
3893 | .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, | 3763 | .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, |
3894 | .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode | 3764 | .set_safe_mode = gfx_v9_0_set_safe_mode, |
3765 | .unset_safe_mode = gfx_v9_0_unset_safe_mode, | ||
3766 | .init = gfx_v9_0_rlc_init, | ||
3767 | .get_csb_size = gfx_v9_0_get_csb_size, | ||
3768 | .get_csb_buffer = gfx_v9_0_get_csb_buffer, | ||
3769 | .get_cp_table_num = gfx_v9_0_cp_jump_table_num, | ||
3770 | .resume = gfx_v9_0_rlc_resume, | ||
3771 | .stop = gfx_v9_0_rlc_stop, | ||
3772 | .reset = gfx_v9_0_rlc_reset, | ||
3773 | .start = gfx_v9_0_rlc_start | ||
3895 | }; | 3774 | }; |
3896 | 3775 | ||
3897 | static int gfx_v9_0_set_powergating_state(void *handle, | 3776 | static int gfx_v9_0_set_powergating_state(void *handle, |
@@ -4072,9 +3951,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
4072 | } | 3951 | } |
4073 | 3952 | ||
4074 | static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | 3953 | static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, |
4075 | struct amdgpu_ib *ib, | 3954 | struct amdgpu_job *job, |
4076 | unsigned vmid, bool ctx_switch) | 3955 | struct amdgpu_ib *ib, |
3956 | bool ctx_switch) | ||
4077 | { | 3957 | { |
3958 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
4078 | u32 header, control = 0; | 3959 | u32 header, control = 0; |
4079 | 3960 | ||
4080 | if (ib->flags & AMDGPU_IB_FLAG_CE) | 3961 | if (ib->flags & AMDGPU_IB_FLAG_CE) |
@@ -4103,20 +3984,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, | |||
4103 | } | 3984 | } |
4104 | 3985 | ||
4105 | static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, | 3986 | static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, |
4106 | struct amdgpu_ib *ib, | 3987 | struct amdgpu_job *job, |
4107 | unsigned vmid, bool ctx_switch) | 3988 | struct amdgpu_ib *ib, |
3989 | bool ctx_switch) | ||
4108 | { | 3990 | { |
4109 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); | 3991 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
3992 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); | ||
4110 | 3993 | ||
4111 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 3994 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
4112 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ | 3995 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ |
4113 | amdgpu_ring_write(ring, | 3996 | amdgpu_ring_write(ring, |
4114 | #ifdef __BIG_ENDIAN | 3997 | #ifdef __BIG_ENDIAN |
4115 | (2 << 0) | | 3998 | (2 << 0) | |
4116 | #endif | 3999 | #endif |
4117 | lower_32_bits(ib->gpu_addr)); | 4000 | lower_32_bits(ib->gpu_addr)); |
4118 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | 4001 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
4119 | amdgpu_ring_write(ring, control); | 4002 | amdgpu_ring_write(ring, control); |
4120 | } | 4003 | } |
4121 | 4004 | ||
4122 | static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, | 4005 | static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, |
@@ -4695,12 +4578,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev, | |||
4695 | return 0; | 4578 | return 0; |
4696 | } | 4579 | } |
4697 | 4580 | ||
4581 | static void gfx_v9_0_fault(struct amdgpu_device *adev, | ||
4582 | struct amdgpu_iv_entry *entry) | ||
4583 | { | ||
4584 | u8 me_id, pipe_id, queue_id; | ||
4585 | struct amdgpu_ring *ring; | ||
4586 | int i; | ||
4587 | |||
4588 | me_id = (entry->ring_id & 0x0c) >> 2; | ||
4589 | pipe_id = (entry->ring_id & 0x03) >> 0; | ||
4590 | queue_id = (entry->ring_id & 0x70) >> 4; | ||
4591 | |||
4592 | switch (me_id) { | ||
4593 | case 0: | ||
4594 | drm_sched_fault(&adev->gfx.gfx_ring[0].sched); | ||
4595 | break; | ||
4596 | case 1: | ||
4597 | case 2: | ||
4598 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
4599 | ring = &adev->gfx.compute_ring[i]; | ||
4600 | if (ring->me == me_id && ring->pipe == pipe_id && | ||
4601 | ring->queue == queue_id) | ||
4602 | drm_sched_fault(&ring->sched); | ||
4603 | } | ||
4604 | break; | ||
4605 | } | ||
4606 | } | ||
4607 | |||
4698 | static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, | 4608 | static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev, |
4699 | struct amdgpu_irq_src *source, | 4609 | struct amdgpu_irq_src *source, |
4700 | struct amdgpu_iv_entry *entry) | 4610 | struct amdgpu_iv_entry *entry) |
4701 | { | 4611 | { |
4702 | DRM_ERROR("Illegal register access in command stream\n"); | 4612 | DRM_ERROR("Illegal register access in command stream\n"); |
4703 | schedule_work(&adev->reset_work); | 4613 | gfx_v9_0_fault(adev, entry); |
4704 | return 0; | 4614 | return 0; |
4705 | } | 4615 | } |
4706 | 4616 | ||
@@ -4709,7 +4619,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, | |||
4709 | struct amdgpu_iv_entry *entry) | 4619 | struct amdgpu_iv_entry *entry) |
4710 | { | 4620 | { |
4711 | DRM_ERROR("Illegal instruction in command stream\n"); | 4621 | DRM_ERROR("Illegal instruction in command stream\n"); |
4712 | schedule_work(&adev->reset_work); | 4622 | gfx_v9_0_fault(adev, entry); |
4713 | return 0; | 4623 | return 0; |
4714 | } | 4624 | } |
4715 | 4625 | ||
@@ -4836,10 +4746,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { | |||
4836 | 2 + /* gfx_v9_0_ring_emit_vm_flush */ | 4746 | 2 + /* gfx_v9_0_ring_emit_vm_flush */ |
4837 | 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ | 4747 | 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ |
4838 | .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ | 4748 | .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ |
4839 | .emit_ib = gfx_v9_0_ring_emit_ib_compute, | ||
4840 | .emit_fence = gfx_v9_0_ring_emit_fence_kiq, | 4749 | .emit_fence = gfx_v9_0_ring_emit_fence_kiq, |
4841 | .test_ring = gfx_v9_0_ring_test_ring, | 4750 | .test_ring = gfx_v9_0_ring_test_ring, |
4842 | .test_ib = gfx_v9_0_ring_test_ib, | ||
4843 | .insert_nop = amdgpu_ring_insert_nop, | 4751 | .insert_nop = amdgpu_ring_insert_nop, |
4844 | .pad_ib = amdgpu_ring_generic_pad_ib, | 4752 | .pad_ib = amdgpu_ring_generic_pad_ib, |
4845 | .emit_rreg = gfx_v9_0_ring_emit_rreg, | 4753 | .emit_rreg = gfx_v9_0_ring_emit_rreg, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ceb7847b504f..f5edddf3b29d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | |||
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev) | |||
35 | return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24; | 35 | return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24; |
36 | } | 36 | } |
37 | 37 | ||
38 | static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) | 38 | void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, |
39 | uint64_t page_table_base) | ||
39 | { | 40 | { |
40 | uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); | 41 | /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ |
42 | int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 | ||
43 | - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; | ||
41 | 44 | ||
42 | WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, | 45 | WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, |
43 | lower_32_bits(value)); | 46 | offset * vmid, lower_32_bits(page_table_base)); |
44 | 47 | ||
45 | WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, | 48 | WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, |
46 | upper_32_bits(value)); | 49 | offset * vmid, upper_32_bits(page_table_base)); |
47 | } | 50 | } |
48 | 51 | ||
49 | static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) | 52 | static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) |
50 | { | 53 | { |
51 | gfxhub_v1_0_init_gart_pt_regs(adev); | 54 | uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); |
55 | |||
56 | gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); | ||
52 | 57 | ||
53 | WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, | 58 | WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, |
54 | (u32)(adev->gmc.gart_start >> 12)); | 59 | (u32)(adev->gmc.gart_start >> 12)); |
@@ -72,7 +77,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) | |||
72 | 77 | ||
73 | /* Program the system aperture low logical page number. */ | 78 | /* Program the system aperture low logical page number. */ |
74 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | 79 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
75 | min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); | 80 | min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); |
76 | 81 | ||
77 | if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) | 82 | if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) |
78 | /* | 83 | /* |
@@ -82,11 +87,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) | |||
82 | * to get rid of the VM fault and hardware hang. | 87 | * to get rid of the VM fault and hardware hang. |
83 | */ | 88 | */ |
84 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 89 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
85 | max((adev->gmc.vram_end >> 18) + 0x1, | 90 | max((adev->gmc.fb_end >> 18) + 0x1, |
86 | adev->gmc.agp_end >> 18)); | 91 | adev->gmc.agp_end >> 18)); |
87 | else | 92 | else |
88 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 93 | WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
89 | max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); | 94 | max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); |
90 | 95 | ||
91 | /* Set default page address. */ | 96 | /* Set default page address. */ |
92 | value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start | 97 | value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index 206e29cad753..92d3a70cd9b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h | |||
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, | |||
30 | bool value); | 30 | bool value); |
31 | void gfxhub_v1_0_init(struct amdgpu_device *adev); | 31 | void gfxhub_v1_0_init(struct amdgpu_device *adev); |
32 | u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); | 32 | u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); |
33 | void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, | ||
34 | uint64_t page_table_base); | ||
33 | 35 | ||
34 | #endif | 36 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1c2b4e9c7b2..2821d1d846e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
@@ -358,7 +358,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev) | |||
358 | return 0; | 358 | return 0; |
359 | } | 359 | } |
360 | 360 | ||
361 | static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) | 361 | static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, |
362 | uint32_t vmid, uint32_t flush_type) | ||
362 | { | 363 | { |
363 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | 364 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
364 | } | 365 | } |
@@ -580,7 +581,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) | |||
580 | else | 581 | else |
581 | gmc_v6_0_set_fault_enable_default(adev, true); | 582 | gmc_v6_0_set_fault_enable_default(adev, true); |
582 | 583 | ||
583 | gmc_v6_0_flush_gpu_tlb(adev, 0); | 584 | gmc_v6_0_flush_gpu_tlb(adev, 0, 0); |
584 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", | 585 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", |
585 | (unsigned)(adev->gmc.gart_size >> 20), | 586 | (unsigned)(adev->gmc.gart_size >> 20), |
586 | (unsigned long long)table_addr); | 587 | (unsigned long long)table_addr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 910c4ce19cb3..761dcfb2fec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) | |||
430 | * | 430 | * |
431 | * Flush the TLB for the requested page table (CIK). | 431 | * Flush the TLB for the requested page table (CIK). |
432 | */ | 432 | */ |
433 | static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid) | 433 | static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, |
434 | uint32_t vmid, uint32_t flush_type) | ||
434 | { | 435 | { |
435 | /* bits 0-15 are the VM contexts0-15 */ | 436 | /* bits 0-15 are the VM contexts0-15 */ |
436 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | 437 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | |||
698 | WREG32(mmCHUB_CONTROL, tmp); | 699 | WREG32(mmCHUB_CONTROL, tmp); |
699 | } | 700 | } |
700 | 701 | ||
701 | gmc_v7_0_flush_gpu_tlb(adev, 0); | 702 | gmc_v7_0_flush_gpu_tlb(adev, 0, 0); |
702 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 703 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
703 | (unsigned)(adev->gmc.gart_size >> 20), | 704 | (unsigned)(adev->gmc.gart_size >> 20), |
704 | (unsigned long long)table_addr); | 705 | (unsigned long long)table_addr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1d3265c97b70..531aaf377592 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -611,7 +611,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) | |||
611 | * Flush the TLB for the requested page table (CIK). | 611 | * Flush the TLB for the requested page table (CIK). |
612 | */ | 612 | */ |
613 | static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, | 613 | static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev, |
614 | uint32_t vmid) | 614 | uint32_t vmid, uint32_t flush_type) |
615 | { | 615 | { |
616 | /* bits 0-15 are the VM contexts0-15 */ | 616 | /* bits 0-15 are the VM contexts0-15 */ |
617 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | 617 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
@@ -920,7 +920,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |||
920 | else | 920 | else |
921 | gmc_v8_0_set_fault_enable_default(adev, true); | 921 | gmc_v8_0_set_fault_enable_default(adev, true); |
922 | 922 | ||
923 | gmc_v8_0_flush_gpu_tlb(adev, 0); | 923 | gmc_v8_0_flush_gpu_tlb(adev, 0, 0); |
924 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 924 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
925 | (unsigned)(adev->gmc.gart_size >> 20), | 925 | (unsigned)(adev->gmc.gart_size >> 20), |
926 | (unsigned long long)table_addr); | 926 | (unsigned long long)table_addr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f35d7a554ad5..811231e4ec53 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | |||
@@ -293,14 +293,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) | |||
293 | adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; | 293 | adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; |
294 | } | 294 | } |
295 | 295 | ||
296 | static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) | 296 | static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, |
297 | uint32_t flush_type) | ||
297 | { | 298 | { |
298 | u32 req = 0; | 299 | u32 req = 0; |
299 | 300 | ||
300 | /* invalidate using legacy mode on vmid*/ | ||
301 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, | 301 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, |
302 | PER_VMID_INVALIDATE_REQ, 1 << vmid); | 302 | PER_VMID_INVALIDATE_REQ, 1 << vmid); |
303 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); | 303 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); |
304 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); | 304 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); |
305 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); | 305 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); |
306 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); | 306 | req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); |
@@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) | |||
312 | return req; | 312 | return req; |
313 | } | 313 | } |
314 | 314 | ||
315 | static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev, | ||
316 | uint32_t reg0, uint32_t reg1, | ||
317 | uint32_t ref, uint32_t mask) | ||
318 | { | ||
319 | signed long r, cnt = 0; | ||
320 | unsigned long flags; | ||
321 | uint32_t seq; | ||
322 | struct amdgpu_kiq *kiq = &adev->gfx.kiq; | ||
323 | struct amdgpu_ring *ring = &kiq->ring; | ||
324 | |||
325 | spin_lock_irqsave(&kiq->ring_lock, flags); | ||
326 | |||
327 | amdgpu_ring_alloc(ring, 32); | ||
328 | amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1, | ||
329 | ref, mask); | ||
330 | amdgpu_fence_emit_polling(ring, &seq); | ||
331 | amdgpu_ring_commit(ring); | ||
332 | spin_unlock_irqrestore(&kiq->ring_lock, flags); | ||
333 | |||
334 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
335 | |||
336 | /* don't wait anymore for IRQ context */ | ||
337 | if (r < 1 && in_interrupt()) | ||
338 | goto failed_kiq; | ||
339 | |||
340 | might_sleep(); | ||
341 | |||
342 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { | ||
343 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); | ||
344 | r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); | ||
345 | } | ||
346 | |||
347 | if (cnt > MAX_KIQ_REG_TRY) | ||
348 | goto failed_kiq; | ||
349 | |||
350 | return 0; | ||
351 | |||
352 | failed_kiq: | ||
353 | pr_err("failed to invalidate tlb with kiq\n"); | ||
354 | return r; | ||
355 | } | ||
356 | |||
357 | /* | 315 | /* |
358 | * GART | 316 | * GART |
359 | * VMID 0 is the physical GPU addresses as used by the kernel. | 317 | * VMID 0 is the physical GPU addresses as used by the kernel. |
@@ -362,64 +320,47 @@ failed_kiq: | |||
362 | */ | 320 | */ |
363 | 321 | ||
364 | /** | 322 | /** |
365 | * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback | 323 | * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type |
366 | * | 324 | * |
367 | * @adev: amdgpu_device pointer | 325 | * @adev: amdgpu_device pointer |
368 | * @vmid: vm instance to flush | 326 | * @vmid: vm instance to flush |
327 | * @flush_type: the flush type | ||
369 | * | 328 | * |
370 | * Flush the TLB for the requested page table. | 329 | * Flush the TLB for the requested page table using certain type. |
371 | */ | 330 | */ |
372 | static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, | 331 | static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, |
373 | uint32_t vmid) | 332 | uint32_t vmid, uint32_t flush_type) |
374 | { | 333 | { |
375 | /* Use register 17 for GART */ | ||
376 | const unsigned eng = 17; | 334 | const unsigned eng = 17; |
377 | unsigned i, j; | 335 | unsigned i, j; |
378 | int r; | ||
379 | 336 | ||
380 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { | 337 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
381 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; | 338 | struct amdgpu_vmhub *hub = &adev->vmhub[i]; |
382 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid); | 339 | u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); |
383 | |||
384 | if (adev->gfx.kiq.ring.ready && | ||
385 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && | ||
386 | !adev->in_gpu_reset) { | ||
387 | r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng, | ||
388 | hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid); | ||
389 | if (!r) | ||
390 | continue; | ||
391 | } | ||
392 | 340 | ||
393 | spin_lock(&adev->gmc.invalidate_lock); | 341 | if (i == AMDGPU_GFXHUB && !adev->in_gpu_reset && |
342 | adev->gfx.kiq.ring.sched.ready && | ||
343 | (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) { | ||
344 | uint32_t req = hub->vm_inv_eng0_req + eng; | ||
345 | uint32_t ack = hub->vm_inv_eng0_ack + eng; | ||
394 | 346 | ||
395 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); | 347 | amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, |
396 | 348 | 1 << vmid); | |
397 | /* Busy wait for ACK.*/ | ||
398 | for (j = 0; j < 100; j++) { | ||
399 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); | ||
400 | tmp &= 1 << vmid; | ||
401 | if (tmp) | ||
402 | break; | ||
403 | cpu_relax(); | ||
404 | } | ||
405 | if (j < 100) { | ||
406 | spin_unlock(&adev->gmc.invalidate_lock); | ||
407 | continue; | 349 | continue; |
408 | } | 350 | } |
409 | 351 | ||
410 | /* Wait for ACK with a delay.*/ | 352 | spin_lock(&adev->gmc.invalidate_lock); |
353 | WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); | ||
411 | for (j = 0; j < adev->usec_timeout; j++) { | 354 | for (j = 0; j < adev->usec_timeout; j++) { |
412 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); | 355 | tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng); |
413 | tmp &= 1 << vmid; | 356 | if (tmp & (1 << vmid)) |
414 | if (tmp) | ||
415 | break; | 357 | break; |
416 | udelay(1); | 358 | udelay(1); |
417 | } | 359 | } |
418 | if (j < adev->usec_timeout) { | ||
419 | spin_unlock(&adev->gmc.invalidate_lock); | ||
420 | continue; | ||
421 | } | ||
422 | spin_unlock(&adev->gmc.invalidate_lock); | 360 | spin_unlock(&adev->gmc.invalidate_lock); |
361 | if (j < adev->usec_timeout) | ||
362 | continue; | ||
363 | |||
423 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); | 364 | DRM_ERROR("Timeout waiting for VM flush ACK!\n"); |
424 | } | 365 | } |
425 | } | 366 | } |
@@ -429,7 +370,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, | |||
429 | { | 370 | { |
430 | struct amdgpu_device *adev = ring->adev; | 371 | struct amdgpu_device *adev = ring->adev; |
431 | struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; | 372 | struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub]; |
432 | uint32_t req = gmc_v9_0_get_invalidate_req(vmid); | 373 | uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); |
433 | unsigned eng = ring->vm_inv_eng; | 374 | unsigned eng = ring->vm_inv_eng; |
434 | 375 | ||
435 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), | 376 | amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), |
@@ -739,9 +680,8 @@ static int gmc_v9_0_late_init(void *handle) | |||
739 | unsigned vmhub = ring->funcs->vmhub; | 680 | unsigned vmhub = ring->funcs->vmhub; |
740 | 681 | ||
741 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; | 682 | ring->vm_inv_eng = vm_inv_eng[vmhub]++; |
742 | dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", | 683 | dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", |
743 | ring->idx, ring->name, ring->vm_inv_eng, | 684 | ring->name, ring->vm_inv_eng, ring->funcs->vmhub); |
744 | ring->funcs->vmhub); | ||
745 | } | 685 | } |
746 | 686 | ||
747 | /* Engine 16 is used for KFD and 17 for GART flushes */ | 687 | /* Engine 16 is used for KFD and 17 for GART flushes */ |
@@ -1122,7 +1062,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) | |||
1122 | 1062 | ||
1123 | gfxhub_v1_0_set_fault_enable_default(adev, value); | 1063 | gfxhub_v1_0_set_fault_enable_default(adev, value); |
1124 | mmhub_v1_0_set_fault_enable_default(adev, value); | 1064 | mmhub_v1_0_set_fault_enable_default(adev, value); |
1125 | gmc_v9_0_flush_gpu_tlb(adev, 0); | 1065 | gmc_v9_0_flush_gpu_tlb(adev, 0, 0); |
1126 | 1066 | ||
1127 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 1067 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
1128 | (unsigned)(adev->gmc.gart_size >> 20), | 1068 | (unsigned)(adev->gmc.gart_size >> 20), |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index d0e478f43443..0c9a2c03504e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) | |||
508 | pi->caps_db_ramping || | 508 | pi->caps_db_ramping || |
509 | pi->caps_td_ramping || | 509 | pi->caps_td_ramping || |
510 | pi->caps_tcp_ramping) { | 510 | pi->caps_tcp_ramping) { |
511 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 511 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
512 | 512 | ||
513 | if (enable) { | 513 | if (enable) { |
514 | ret = kv_program_pt_config_registers(adev, didt_config_kv); | 514 | ret = kv_program_pt_config_registers(adev, didt_config_kv); |
515 | if (ret) { | 515 | if (ret) { |
516 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 516 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
517 | return ret; | 517 | return ret; |
518 | } | 518 | } |
519 | } | 519 | } |
520 | 520 | ||
521 | kv_do_enable_didt(adev, enable); | 521 | kv_do_enable_didt(adev, enable); |
522 | 522 | ||
523 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 523 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
524 | } | 524 | } |
525 | 525 | ||
526 | return 0; | 526 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index fd23ba1226a5..d0d966d6080a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | |||
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev) | |||
52 | return base; | 52 | return base; |
53 | } | 53 | } |
54 | 54 | ||
55 | static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev) | 55 | void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, |
56 | uint64_t page_table_base) | ||
56 | { | 57 | { |
57 | uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); | 58 | /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */ |
59 | int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 | ||
60 | - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; | ||
58 | 61 | ||
59 | WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, | 62 | WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, |
60 | lower_32_bits(value)); | 63 | offset * vmid, lower_32_bits(page_table_base)); |
61 | 64 | ||
62 | WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, | 65 | WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, |
63 | upper_32_bits(value)); | 66 | offset * vmid, upper_32_bits(page_table_base)); |
64 | } | 67 | } |
65 | 68 | ||
66 | static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) | 69 | static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev) |
67 | { | 70 | { |
68 | mmhub_v1_0_init_gart_pt_regs(adev); | 71 | uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); |
72 | |||
73 | mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base); | ||
69 | 74 | ||
70 | WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, | 75 | WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, |
71 | (u32)(adev->gmc.gart_start >> 12)); | 76 | (u32)(adev->gmc.gart_start >> 12)); |
@@ -90,7 +95,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) | |||
90 | 95 | ||
91 | /* Program the system aperture low logical page number. */ | 96 | /* Program the system aperture low logical page number. */ |
92 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | 97 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
93 | min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); | 98 | min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); |
94 | 99 | ||
95 | if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) | 100 | if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) |
96 | /* | 101 | /* |
@@ -100,11 +105,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) | |||
100 | * to get rid of the VM fault and hardware hang. | 105 | * to get rid of the VM fault and hardware hang. |
101 | */ | 106 | */ |
102 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 107 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
103 | max((adev->gmc.vram_end >> 18) + 0x1, | 108 | max((adev->gmc.fb_end >> 18) + 0x1, |
104 | adev->gmc.agp_end >> 18)); | 109 | adev->gmc.agp_end >> 18)); |
105 | else | 110 | else |
106 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | 111 | WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
107 | max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); | 112 | max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); |
108 | 113 | ||
109 | /* Set default page address. */ | 114 | /* Set default page address. */ |
110 | value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + | 115 | value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + |
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index bef3d0c0c117..0de0fdf98c00 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | |||
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, | |||
34 | void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); | 34 | void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); |
35 | void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, | 35 | void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, |
36 | bool enable); | 36 | bool enable); |
37 | void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, | ||
38 | uint64_t page_table_base); | ||
37 | 39 | ||
38 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 3f3fac2d50cd..e5dd052d9e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "nbio/nbio_7_4_offset.h" | 34 | #include "nbio/nbio_7_4_offset.h" |
35 | 35 | ||
36 | MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); | 36 | MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); |
37 | MODULE_FIRMWARE("amdgpu/vega20_ta.bin"); | ||
37 | 38 | ||
38 | /* address block */ | 39 | /* address block */ |
39 | #define smnMP1_FIRMWARE_FLAGS 0x3010024 | 40 | #define smnMP1_FIRMWARE_FLAGS 0x3010024 |
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
98 | const char *chip_name; | 99 | const char *chip_name; |
99 | char fw_name[30]; | 100 | char fw_name[30]; |
100 | int err = 0; | 101 | int err = 0; |
101 | const struct psp_firmware_header_v1_0 *hdr; | 102 | const struct psp_firmware_header_v1_0 *sos_hdr; |
103 | const struct ta_firmware_header_v1_0 *ta_hdr; | ||
102 | 104 | ||
103 | DRM_DEBUG("\n"); | 105 | DRM_DEBUG("\n"); |
104 | 106 | ||
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
119 | if (err) | 121 | if (err) |
120 | goto out; | 122 | goto out; |
121 | 123 | ||
122 | hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; | 124 | sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; |
123 | adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version); | 125 | adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); |
124 | adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version); | 126 | adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); |
125 | adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes); | 127 | adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); |
126 | adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) - | 128 | adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) - |
127 | le32_to_cpu(hdr->sos_size_bytes); | 129 | le32_to_cpu(sos_hdr->sos_size_bytes); |
128 | adev->psp.sys_start_addr = (uint8_t *)hdr + | 130 | adev->psp.sys_start_addr = (uint8_t *)sos_hdr + |
129 | le32_to_cpu(hdr->header.ucode_array_offset_bytes); | 131 | le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); |
130 | adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + | 132 | adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + |
131 | le32_to_cpu(hdr->sos_offset_bytes); | 133 | le32_to_cpu(sos_hdr->sos_offset_bytes); |
134 | |||
135 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); | ||
136 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); | ||
137 | if (err) | ||
138 | goto out; | ||
139 | |||
140 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | ||
141 | if (err) | ||
142 | goto out; | ||
143 | |||
144 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; | ||
145 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); | ||
146 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | ||
147 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | ||
148 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | ||
149 | |||
132 | return 0; | 150 | return 0; |
133 | out: | 151 | out: |
134 | if (err) { | 152 | if (err) { |
@@ -167,7 +185,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) | |||
167 | /* Copy PSP System Driver binary to memory */ | 185 | /* Copy PSP System Driver binary to memory */ |
168 | memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); | 186 | memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); |
169 | 187 | ||
170 | /* Provide the sys driver to bootrom */ | 188 | /* Provide the sys driver to bootloader */ |
171 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, | 189 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, |
172 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); | 190 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); |
173 | psp_gfxdrv_command_reg = 1 << 16; | 191 | psp_gfxdrv_command_reg = 1 << 16; |
@@ -208,7 +226,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) | |||
208 | /* Copy Secure OS binary to PSP memory */ | 226 | /* Copy Secure OS binary to PSP memory */ |
209 | memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); | 227 | memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); |
210 | 228 | ||
211 | /* Provide the PSP secure OS to bootrom */ | 229 | /* Provide the PSP secure OS to bootloader */ |
212 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, | 230 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, |
213 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); | 231 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); |
214 | psp_gfxdrv_command_reg = 2 << 16; | 232 | psp_gfxdrv_command_reg = 2 << 16; |
@@ -552,24 +570,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) | |||
552 | static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, | 570 | static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp, |
553 | int number_devices, struct psp_xgmi_topology_info *topology) | 571 | int number_devices, struct psp_xgmi_topology_info *topology) |
554 | { | 572 | { |
573 | struct ta_xgmi_shared_memory *xgmi_cmd; | ||
574 | struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; | ||
575 | struct ta_xgmi_cmd_get_topology_info_output *topology_info_output; | ||
576 | int i; | ||
577 | int ret; | ||
578 | |||
579 | if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) | ||
580 | return -EINVAL; | ||
581 | |||
582 | xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; | ||
583 | memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); | ||
584 | |||
585 | /* Fill in the shared memory with topology information as input */ | ||
586 | topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; | ||
587 | xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO; | ||
588 | topology_info_input->num_nodes = number_devices; | ||
589 | |||
590 | for (i = 0; i < topology_info_input->num_nodes; i++) { | ||
591 | topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; | ||
592 | topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; | ||
593 | topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; | ||
594 | topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; | ||
595 | } | ||
596 | |||
597 | /* Invoke xgmi ta to get the topology information */ | ||
598 | ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO); | ||
599 | if (ret) | ||
600 | return ret; | ||
601 | |||
602 | /* Read the output topology information from the shared memory */ | ||
603 | topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info; | ||
604 | topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes; | ||
605 | for (i = 0; i < topology->num_nodes; i++) { | ||
606 | topology->nodes[i].node_id = topology_info_output->nodes[i].node_id; | ||
607 | topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops; | ||
608 | topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled; | ||
609 | topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine; | ||
610 | } | ||
611 | |||
555 | return 0; | 612 | return 0; |
556 | } | 613 | } |
557 | 614 | ||
558 | static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, | 615 | static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp, |
559 | int number_devices, struct psp_xgmi_topology_info *topology) | 616 | int number_devices, struct psp_xgmi_topology_info *topology) |
560 | { | 617 | { |
561 | return 0; | 618 | struct ta_xgmi_shared_memory *xgmi_cmd; |
619 | struct ta_xgmi_cmd_get_topology_info_input *topology_info_input; | ||
620 | int i; | ||
621 | |||
622 | if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES) | ||
623 | return -EINVAL; | ||
624 | |||
625 | xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; | ||
626 | memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); | ||
627 | |||
628 | topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info; | ||
629 | xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO; | ||
630 | topology_info_input->num_nodes = number_devices; | ||
631 | |||
632 | for (i = 0; i < topology_info_input->num_nodes; i++) { | ||
633 | topology_info_input->nodes[i].node_id = topology->nodes[i].node_id; | ||
634 | topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops; | ||
635 | topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled; | ||
636 | topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine; | ||
637 | } | ||
638 | |||
639 | /* Invoke xgmi ta to set topology information */ | ||
640 | return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO); | ||
562 | } | 641 | } |
563 | 642 | ||
564 | static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) | 643 | static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp) |
565 | { | 644 | { |
566 | u64 hive_id = 0; | 645 | struct ta_xgmi_shared_memory *xgmi_cmd; |
646 | int ret; | ||
647 | |||
648 | xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; | ||
649 | memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); | ||
650 | |||
651 | xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID; | ||
652 | |||
653 | /* Invoke xgmi ta to get hive id */ | ||
654 | ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); | ||
655 | if (ret) | ||
656 | return 0; | ||
657 | else | ||
658 | return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id; | ||
659 | } | ||
660 | |||
661 | static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp) | ||
662 | { | ||
663 | struct ta_xgmi_shared_memory *xgmi_cmd; | ||
664 | int ret; | ||
665 | |||
666 | xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf; | ||
667 | memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory)); | ||
567 | 668 | ||
568 | /* Remove me when we can get correct hive_id through PSP */ | 669 | xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID; |
569 | if (psp->adev->gmc.xgmi.num_physical_nodes) | ||
570 | hive_id = 0x123456789abcdef; | ||
571 | 670 | ||
572 | return hive_id; | 671 | /* Invoke xgmi ta to get the node id */ |
672 | ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id); | ||
673 | if (ret) | ||
674 | return 0; | ||
675 | else | ||
676 | return xgmi_cmd->xgmi_out_message.get_node_id.node_id; | ||
573 | } | 677 | } |
574 | 678 | ||
575 | static const struct psp_funcs psp_v11_0_funcs = { | 679 | static const struct psp_funcs psp_v11_0_funcs = { |
@@ -587,6 +691,7 @@ static const struct psp_funcs psp_v11_0_funcs = { | |||
587 | .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, | 691 | .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info, |
588 | .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, | 692 | .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info, |
589 | .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, | 693 | .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id, |
694 | .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id, | ||
590 | }; | 695 | }; |
591 | 696 | ||
592 | void psp_v11_0_set_psp_funcs(struct psp_context *psp) | 697 | void psp_v11_0_set_psp_funcs(struct psp_context *psp) |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index e1ebf770c303..9cea0bbe4525 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | |||
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) | |||
194 | /* Copy PSP System Driver binary to memory */ | 194 | /* Copy PSP System Driver binary to memory */ |
195 | memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); | 195 | memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); |
196 | 196 | ||
197 | /* Provide the sys driver to bootrom */ | 197 | /* Provide the sys driver to bootloader */ |
198 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, | 198 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, |
199 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); | 199 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); |
200 | psp_gfxdrv_command_reg = 1 << 16; | 200 | psp_gfxdrv_command_reg = 1 << 16; |
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) | |||
254 | /* Copy Secure OS binary to PSP memory */ | 254 | /* Copy Secure OS binary to PSP memory */ |
255 | memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); | 255 | memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); |
256 | 256 | ||
257 | /* Provide the PSP secure OS to bootrom */ | 257 | /* Provide the PSP secure OS to bootloader */ |
258 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, | 258 | WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, |
259 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); | 259 | (uint32_t)(psp->fw_pri_mc_addr >> 20)); |
260 | psp_gfxdrv_command_reg = 2 << 16; | 260 | psp_gfxdrv_command_reg = 2 << 16; |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 2d4770e173dd..9f3cb2aec7c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) | |||
225 | 225 | ||
226 | static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 226 | static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
227 | { | 227 | { |
228 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 228 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
229 | int i; | 229 | int i; |
230 | 230 | ||
231 | for (i = 0; i < count; i++) | 231 | for (i = 0; i < count; i++) |
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
245 | * Schedule an IB in the DMA ring (VI). | 245 | * Schedule an IB in the DMA ring (VI). |
246 | */ | 246 | */ |
247 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | 247 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, |
248 | struct amdgpu_job *job, | ||
248 | struct amdgpu_ib *ib, | 249 | struct amdgpu_ib *ib, |
249 | unsigned vmid, bool ctx_switch) | 250 | bool ctx_switch) |
250 | { | 251 | { |
252 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
253 | |||
251 | /* IB packet must end on a 8 DW boundary */ | 254 | /* IB packet must end on a 8 DW boundary */ |
252 | sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); | 255 | sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); |
253 | 256 | ||
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) | |||
349 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | 352 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); |
350 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 353 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
351 | } | 354 | } |
352 | sdma0->ready = false; | 355 | sdma0->sched.ready = false; |
353 | sdma1->ready = false; | 356 | sdma1->sched.ready = false; |
354 | } | 357 | } |
355 | 358 | ||
356 | /** | 359 | /** |
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
471 | /* enable DMA IBs */ | 474 | /* enable DMA IBs */ |
472 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 475 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
473 | 476 | ||
474 | ring->ready = true; | 477 | ring->sched.ready = true; |
475 | } | 478 | } |
476 | 479 | ||
477 | sdma_v2_4_enable(adev, true); | 480 | sdma_v2_4_enable(adev, true); |
478 | for (i = 0; i < adev->sdma.num_instances; i++) { | 481 | for (i = 0; i < adev->sdma.num_instances; i++) { |
479 | ring = &adev->sdma.instance[i].ring; | 482 | ring = &adev->sdma.instance[i].ring; |
480 | r = amdgpu_ring_test_ring(ring); | 483 | r = amdgpu_ring_test_helper(ring); |
481 | if (r) { | 484 | if (r) |
482 | ring->ready = false; | ||
483 | return r; | 485 | return r; |
484 | } | ||
485 | 486 | ||
486 | if (adev->mman.buffer_funcs_ring == ring) | 487 | if (adev->mman.buffer_funcs_ring == ring) |
487 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | 488 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | |||
550 | u64 gpu_addr; | 551 | u64 gpu_addr; |
551 | 552 | ||
552 | r = amdgpu_device_wb_get(adev, &index); | 553 | r = amdgpu_device_wb_get(adev, &index); |
553 | if (r) { | 554 | if (r) |
554 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
555 | return r; | 555 | return r; |
556 | } | ||
557 | 556 | ||
558 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 557 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
559 | tmp = 0xCAFEDEAD; | 558 | tmp = 0xCAFEDEAD; |
560 | adev->wb.wb[index] = cpu_to_le32(tmp); | 559 | adev->wb.wb[index] = cpu_to_le32(tmp); |
561 | 560 | ||
562 | r = amdgpu_ring_alloc(ring, 5); | 561 | r = amdgpu_ring_alloc(ring, 5); |
563 | if (r) { | 562 | if (r) |
564 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 563 | goto error_free_wb; |
565 | amdgpu_device_wb_free(adev, index); | ||
566 | return r; | ||
567 | } | ||
568 | 564 | ||
569 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 565 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
570 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | 566 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); |
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | |||
581 | DRM_UDELAY(1); | 577 | DRM_UDELAY(1); |
582 | } | 578 | } |
583 | 579 | ||
584 | if (i < adev->usec_timeout) { | 580 | if (i >= adev->usec_timeout) |
585 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 581 | r = -ETIMEDOUT; |
586 | } else { | ||
587 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
588 | ring->idx, tmp); | ||
589 | r = -EINVAL; | ||
590 | } | ||
591 | amdgpu_device_wb_free(adev, index); | ||
592 | 582 | ||
583 | error_free_wb: | ||
584 | amdgpu_device_wb_free(adev, index); | ||
593 | return r; | 585 | return r; |
594 | } | 586 | } |
595 | 587 | ||
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
612 | long r; | 604 | long r; |
613 | 605 | ||
614 | r = amdgpu_device_wb_get(adev, &index); | 606 | r = amdgpu_device_wb_get(adev, &index); |
615 | if (r) { | 607 | if (r) |
616 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
617 | return r; | 608 | return r; |
618 | } | ||
619 | 609 | ||
620 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 610 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
621 | tmp = 0xCAFEDEAD; | 611 | tmp = 0xCAFEDEAD; |
622 | adev->wb.wb[index] = cpu_to_le32(tmp); | 612 | adev->wb.wb[index] = cpu_to_le32(tmp); |
623 | memset(&ib, 0, sizeof(ib)); | 613 | memset(&ib, 0, sizeof(ib)); |
624 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 614 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
625 | if (r) { | 615 | if (r) |
626 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
627 | goto err0; | 616 | goto err0; |
628 | } | ||
629 | 617 | ||
630 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 618 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
631 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | 619 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); |
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
644 | 632 | ||
645 | r = dma_fence_wait_timeout(f, false, timeout); | 633 | r = dma_fence_wait_timeout(f, false, timeout); |
646 | if (r == 0) { | 634 | if (r == 0) { |
647 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
648 | r = -ETIMEDOUT; | 635 | r = -ETIMEDOUT; |
649 | goto err1; | 636 | goto err1; |
650 | } else if (r < 0) { | 637 | } else if (r < 0) { |
651 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
652 | goto err1; | 638 | goto err1; |
653 | } | 639 | } |
654 | tmp = le32_to_cpu(adev->wb.wb[index]); | 640 | tmp = le32_to_cpu(adev->wb.wb[index]); |
655 | if (tmp == 0xDEADBEEF) { | 641 | if (tmp == 0xDEADBEEF) |
656 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
657 | r = 0; | 642 | r = 0; |
658 | } else { | 643 | else |
659 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
660 | r = -EINVAL; | 644 | r = -EINVAL; |
661 | } | ||
662 | 645 | ||
663 | err1: | 646 | err1: |
664 | amdgpu_ib_free(adev, &ib, NULL); | 647 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, | |||
760 | */ | 743 | */ |
761 | static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 744 | static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
762 | { | 745 | { |
763 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 746 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
764 | u32 pad_count; | 747 | u32 pad_count; |
765 | int i; | 748 | int i; |
766 | 749 | ||
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, | |||
1105 | struct amdgpu_irq_src *source, | 1088 | struct amdgpu_irq_src *source, |
1106 | struct amdgpu_iv_entry *entry) | 1089 | struct amdgpu_iv_entry *entry) |
1107 | { | 1090 | { |
1091 | u8 instance_id, queue_id; | ||
1092 | |||
1108 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | 1093 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); |
1109 | schedule_work(&adev->reset_work); | 1094 | instance_id = (entry->ring_id & 0x3) >> 0; |
1095 | queue_id = (entry->ring_id & 0xc) >> 2; | ||
1096 | |||
1097 | if (instance_id <= 1 && queue_id == 0) | ||
1098 | drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); | ||
1110 | return 0; | 1099 | return 0; |
1111 | } | 1100 | } |
1112 | 1101 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 6fb3edaba0ec..b6a25f92d566 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
399 | 399 | ||
400 | static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 400 | static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
401 | { | 401 | { |
402 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 402 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
403 | int i; | 403 | int i; |
404 | 404 | ||
405 | for (i = 0; i < count; i++) | 405 | for (i = 0; i < count; i++) |
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
419 | * Schedule an IB in the DMA ring (VI). | 419 | * Schedule an IB in the DMA ring (VI). |
420 | */ | 420 | */ |
421 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | 421 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, |
422 | struct amdgpu_job *job, | ||
422 | struct amdgpu_ib *ib, | 423 | struct amdgpu_ib *ib, |
423 | unsigned vmid, bool ctx_switch) | 424 | bool ctx_switch) |
424 | { | 425 | { |
426 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
427 | |||
425 | /* IB packet must end on a 8 DW boundary */ | 428 | /* IB packet must end on a 8 DW boundary */ |
426 | sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); | 429 | sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); |
427 | 430 | ||
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) | |||
523 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | 526 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); |
524 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 527 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
525 | } | 528 | } |
526 | sdma0->ready = false; | 529 | sdma0->sched.ready = false; |
527 | sdma1->ready = false; | 530 | sdma1->sched.ready = false; |
528 | } | 531 | } |
529 | 532 | ||
530 | /** | 533 | /** |
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
739 | /* enable DMA IBs */ | 742 | /* enable DMA IBs */ |
740 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 743 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
741 | 744 | ||
742 | ring->ready = true; | 745 | ring->sched.ready = true; |
743 | } | 746 | } |
744 | 747 | ||
745 | /* unhalt the MEs */ | 748 | /* unhalt the MEs */ |
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
749 | 752 | ||
750 | for (i = 0; i < adev->sdma.num_instances; i++) { | 753 | for (i = 0; i < adev->sdma.num_instances; i++) { |
751 | ring = &adev->sdma.instance[i].ring; | 754 | ring = &adev->sdma.instance[i].ring; |
752 | r = amdgpu_ring_test_ring(ring); | 755 | r = amdgpu_ring_test_helper(ring); |
753 | if (r) { | 756 | if (r) |
754 | ring->ready = false; | ||
755 | return r; | 757 | return r; |
756 | } | ||
757 | 758 | ||
758 | if (adev->mman.buffer_funcs_ring == ring) | 759 | if (adev->mman.buffer_funcs_ring == ring) |
759 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | 760 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | |||
822 | u64 gpu_addr; | 823 | u64 gpu_addr; |
823 | 824 | ||
824 | r = amdgpu_device_wb_get(adev, &index); | 825 | r = amdgpu_device_wb_get(adev, &index); |
825 | if (r) { | 826 | if (r) |
826 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
827 | return r; | 827 | return r; |
828 | } | ||
829 | 828 | ||
830 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 829 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
831 | tmp = 0xCAFEDEAD; | 830 | tmp = 0xCAFEDEAD; |
832 | adev->wb.wb[index] = cpu_to_le32(tmp); | 831 | adev->wb.wb[index] = cpu_to_le32(tmp); |
833 | 832 | ||
834 | r = amdgpu_ring_alloc(ring, 5); | 833 | r = amdgpu_ring_alloc(ring, 5); |
835 | if (r) { | 834 | if (r) |
836 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 835 | goto error_free_wb; |
837 | amdgpu_device_wb_free(adev, index); | ||
838 | return r; | ||
839 | } | ||
840 | 836 | ||
841 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 837 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
842 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | 838 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); |
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | |||
853 | DRM_UDELAY(1); | 849 | DRM_UDELAY(1); |
854 | } | 850 | } |
855 | 851 | ||
856 | if (i < adev->usec_timeout) { | 852 | if (i >= adev->usec_timeout) |
857 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 853 | r = -ETIMEDOUT; |
858 | } else { | ||
859 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
860 | ring->idx, tmp); | ||
861 | r = -EINVAL; | ||
862 | } | ||
863 | amdgpu_device_wb_free(adev, index); | ||
864 | 854 | ||
855 | error_free_wb: | ||
856 | amdgpu_device_wb_free(adev, index); | ||
865 | return r; | 857 | return r; |
866 | } | 858 | } |
867 | 859 | ||
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
884 | long r; | 876 | long r; |
885 | 877 | ||
886 | r = amdgpu_device_wb_get(adev, &index); | 878 | r = amdgpu_device_wb_get(adev, &index); |
887 | if (r) { | 879 | if (r) |
888 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
889 | return r; | 880 | return r; |
890 | } | ||
891 | 881 | ||
892 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 882 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
893 | tmp = 0xCAFEDEAD; | 883 | tmp = 0xCAFEDEAD; |
894 | adev->wb.wb[index] = cpu_to_le32(tmp); | 884 | adev->wb.wb[index] = cpu_to_le32(tmp); |
895 | memset(&ib, 0, sizeof(ib)); | 885 | memset(&ib, 0, sizeof(ib)); |
896 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 886 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
897 | if (r) { | 887 | if (r) |
898 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
899 | goto err0; | 888 | goto err0; |
900 | } | ||
901 | 889 | ||
902 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 890 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
903 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | 891 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); |
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
916 | 904 | ||
917 | r = dma_fence_wait_timeout(f, false, timeout); | 905 | r = dma_fence_wait_timeout(f, false, timeout); |
918 | if (r == 0) { | 906 | if (r == 0) { |
919 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
920 | r = -ETIMEDOUT; | 907 | r = -ETIMEDOUT; |
921 | goto err1; | 908 | goto err1; |
922 | } else if (r < 0) { | 909 | } else if (r < 0) { |
923 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
924 | goto err1; | 910 | goto err1; |
925 | } | 911 | } |
926 | tmp = le32_to_cpu(adev->wb.wb[index]); | 912 | tmp = le32_to_cpu(adev->wb.wb[index]); |
927 | if (tmp == 0xDEADBEEF) { | 913 | if (tmp == 0xDEADBEEF) |
928 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
929 | r = 0; | 914 | r = 0; |
930 | } else { | 915 | else |
931 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
932 | r = -EINVAL; | 916 | r = -EINVAL; |
933 | } | ||
934 | err1: | 917 | err1: |
935 | amdgpu_ib_free(adev, &ib, NULL); | 918 | amdgpu_ib_free(adev, &ib, NULL); |
936 | dma_fence_put(f); | 919 | dma_fence_put(f); |
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, | |||
1031 | */ | 1014 | */ |
1032 | static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 1015 | static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
1033 | { | 1016 | { |
1034 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 1017 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
1035 | u32 pad_count; | 1018 | u32 pad_count; |
1036 | int i; | 1019 | int i; |
1037 | 1020 | ||
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, | |||
1440 | struct amdgpu_irq_src *source, | 1423 | struct amdgpu_irq_src *source, |
1441 | struct amdgpu_iv_entry *entry) | 1424 | struct amdgpu_iv_entry *entry) |
1442 | { | 1425 | { |
1426 | u8 instance_id, queue_id; | ||
1427 | |||
1443 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | 1428 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); |
1444 | schedule_work(&adev->reset_work); | 1429 | instance_id = (entry->ring_id & 0x3) >> 0; |
1430 | queue_id = (entry->ring_id & 0xc) >> 2; | ||
1431 | |||
1432 | if (instance_id <= 1 && queue_id == 0) | ||
1433 | drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); | ||
1445 | return 0; | 1434 | return 0; |
1446 | } | 1435 | } |
1447 | 1436 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 7a8c9172d30a..f4490cdd9804 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin"); | |||
54 | #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L | 54 | #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L |
55 | #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L | 55 | #define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L |
56 | 56 | ||
57 | #define WREG32_SDMA(instance, offset, value) \ | ||
58 | WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value) | ||
59 | #define RREG32_SDMA(instance, offset) \ | ||
60 | RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset))) | ||
61 | |||
57 | static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev); | 62 | static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev); |
58 | static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); | 63 | static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev); |
59 | static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); | 64 | static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev); |
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) | |||
367 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); | 372 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); |
368 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); | 373 | DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); |
369 | } else { | 374 | } else { |
370 | u32 lowbit, highbit; | 375 | wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI); |
371 | |||
372 | lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2; | ||
373 | highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2; | ||
374 | |||
375 | DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n", | ||
376 | ring->me, highbit, lowbit); | ||
377 | wptr = highbit; | ||
378 | wptr = wptr << 32; | 376 | wptr = wptr << 32; |
379 | wptr |= lowbit; | 377 | wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR); |
378 | DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", | ||
379 | ring->me, wptr); | ||
380 | } | 380 | } |
381 | 381 | ||
382 | return wptr >> 2; | 382 | return wptr >> 2; |
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
417 | lower_32_bits(ring->wptr << 2), | 417 | lower_32_bits(ring->wptr << 2), |
418 | ring->me, | 418 | ring->me, |
419 | upper_32_bits(ring->wptr << 2)); | 419 | upper_32_bits(ring->wptr << 2)); |
420 | WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); | 420 | WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR, |
421 | WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); | 421 | lower_32_bits(ring->wptr << 2)); |
422 | WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI, | ||
423 | upper_32_bits(ring->wptr << 2)); | ||
424 | } | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * sdma_v4_0_page_ring_get_wptr - get the current write pointer | ||
429 | * | ||
430 | * @ring: amdgpu ring pointer | ||
431 | * | ||
432 | * Get the current wptr from the hardware (VEGA10+). | ||
433 | */ | ||
434 | static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring) | ||
435 | { | ||
436 | struct amdgpu_device *adev = ring->adev; | ||
437 | u64 wptr; | ||
438 | |||
439 | if (ring->use_doorbell) { | ||
440 | /* XXX check if swapping is necessary on BE */ | ||
441 | wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); | ||
442 | } else { | ||
443 | wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI); | ||
444 | wptr = wptr << 32; | ||
445 | wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR); | ||
446 | } | ||
447 | |||
448 | return wptr >> 2; | ||
449 | } | ||
450 | |||
451 | /** | ||
452 | * sdma_v4_0_ring_set_wptr - commit the write pointer | ||
453 | * | ||
454 | * @ring: amdgpu ring pointer | ||
455 | * | ||
456 | * Write the wptr back to the hardware (VEGA10+). | ||
457 | */ | ||
458 | static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring) | ||
459 | { | ||
460 | struct amdgpu_device *adev = ring->adev; | ||
461 | |||
462 | if (ring->use_doorbell) { | ||
463 | u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; | ||
464 | |||
465 | /* XXX check if swapping is necessary on BE */ | ||
466 | WRITE_ONCE(*wb, (ring->wptr << 2)); | ||
467 | WDOORBELL64(ring->doorbell_index, ring->wptr << 2); | ||
468 | } else { | ||
469 | uint64_t wptr = ring->wptr << 2; | ||
470 | |||
471 | WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR, | ||
472 | lower_32_bits(wptr)); | ||
473 | WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI, | ||
474 | upper_32_bits(wptr)); | ||
422 | } | 475 | } |
423 | } | 476 | } |
424 | 477 | ||
425 | static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 478 | static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
426 | { | 479 | { |
427 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 480 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
428 | int i; | 481 | int i; |
429 | 482 | ||
430 | for (i = 0; i < count; i++) | 483 | for (i = 0; i < count; i++) |
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | |||
444 | * Schedule an IB in the DMA ring (VEGA10). | 497 | * Schedule an IB in the DMA ring (VEGA10). |
445 | */ | 498 | */ |
446 | static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, | 499 | static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, |
447 | struct amdgpu_ib *ib, | 500 | struct amdgpu_job *job, |
448 | unsigned vmid, bool ctx_switch) | 501 | struct amdgpu_ib *ib, |
502 | bool ctx_switch) | ||
449 | { | 503 | { |
504 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
505 | |||
450 | /* IB packet must end on a 8 DW boundary */ | 506 | /* IB packet must end on a 8 DW boundary */ |
451 | sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); | 507 | sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); |
452 | 508 | ||
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev) | |||
568 | amdgpu_ttm_set_buffer_funcs_status(adev, false); | 624 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
569 | 625 | ||
570 | for (i = 0; i < adev->sdma.num_instances; i++) { | 626 | for (i = 0; i < adev->sdma.num_instances; i++) { |
571 | rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); | 627 | rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); |
572 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | 628 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); |
573 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); | 629 | WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); |
574 | ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); | 630 | ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); |
575 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | 631 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); |
576 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); | 632 | WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); |
577 | } | 633 | } |
578 | 634 | ||
579 | sdma0->ready = false; | 635 | sdma0->sched.ready = false; |
580 | sdma1->ready = false; | 636 | sdma1->sched.ready = false; |
581 | } | 637 | } |
582 | 638 | ||
583 | /** | 639 | /** |
@@ -593,6 +649,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev) | |||
593 | } | 649 | } |
594 | 650 | ||
595 | /** | 651 | /** |
652 | * sdma_v4_0_page_stop - stop the page async dma engines | ||
653 | * | ||
654 | * @adev: amdgpu_device pointer | ||
655 | * | ||
656 | * Stop the page async dma ring buffers (VEGA10). | ||
657 | */ | ||
658 | static void sdma_v4_0_page_stop(struct amdgpu_device *adev) | ||
659 | { | ||
660 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page; | ||
661 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page; | ||
662 | u32 rb_cntl, ib_cntl; | ||
663 | int i; | ||
664 | |||
665 | if ((adev->mman.buffer_funcs_ring == sdma0) || | ||
666 | (adev->mman.buffer_funcs_ring == sdma1)) | ||
667 | amdgpu_ttm_set_buffer_funcs_status(adev, false); | ||
668 | |||
669 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
670 | rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); | ||
671 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, | ||
672 | RB_ENABLE, 0); | ||
673 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); | ||
674 | ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL); | ||
675 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, | ||
676 | IB_ENABLE, 0); | ||
677 | WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); | ||
678 | } | ||
679 | |||
680 | sdma0->sched.ready = false; | ||
681 | sdma1->sched.ready = false; | ||
682 | } | ||
683 | |||
684 | /** | ||
596 | * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch | 685 | * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch |
597 | * | 686 | * |
598 | * @adev: amdgpu_device pointer | 687 | * @adev: amdgpu_device pointer |
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) | |||
630 | } | 719 | } |
631 | 720 | ||
632 | for (i = 0; i < adev->sdma.num_instances; i++) { | 721 | for (i = 0; i < adev->sdma.num_instances; i++) { |
633 | f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); | 722 | f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL); |
634 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, | 723 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
635 | AUTO_CTXSW_ENABLE, enable ? 1 : 0); | 724 | AUTO_CTXSW_ENABLE, enable ? 1 : 0); |
636 | if (enable && amdgpu_sdma_phase_quantum) { | 725 | if (enable && amdgpu_sdma_phase_quantum) { |
637 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), | 726 | WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum); |
638 | phase_quantum); | 727 | WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum); |
639 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), | 728 | WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum); |
640 | phase_quantum); | ||
641 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), | ||
642 | phase_quantum); | ||
643 | } | 729 | } |
644 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); | 730 | WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl); |
645 | } | 731 | } |
646 | 732 | ||
647 | } | 733 | } |
@@ -662,156 +748,217 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) | |||
662 | if (enable == false) { | 748 | if (enable == false) { |
663 | sdma_v4_0_gfx_stop(adev); | 749 | sdma_v4_0_gfx_stop(adev); |
664 | sdma_v4_0_rlc_stop(adev); | 750 | sdma_v4_0_rlc_stop(adev); |
751 | if (adev->sdma.has_page_queue) | ||
752 | sdma_v4_0_page_stop(adev); | ||
665 | } | 753 | } |
666 | 754 | ||
667 | for (i = 0; i < adev->sdma.num_instances; i++) { | 755 | for (i = 0; i < adev->sdma.num_instances; i++) { |
668 | f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); | 756 | f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL); |
669 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); | 757 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); |
670 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); | 758 | WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl); |
671 | } | 759 | } |
672 | } | 760 | } |
673 | 761 | ||
674 | /** | 762 | /** |
763 | * sdma_v4_0_rb_cntl - get parameters for rb_cntl | ||
764 | */ | ||
765 | static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl) | ||
766 | { | ||
767 | /* Set ring buffer size in dwords */ | ||
768 | uint32_t rb_bufsz = order_base_2(ring->ring_size / 4); | ||
769 | |||
770 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); | ||
771 | #ifdef __BIG_ENDIAN | ||
772 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); | ||
773 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, | ||
774 | RPTR_WRITEBACK_SWAP_ENABLE, 1); | ||
775 | #endif | ||
776 | return rb_cntl; | ||
777 | } | ||
778 | |||
779 | /** | ||
675 | * sdma_v4_0_gfx_resume - setup and start the async dma engines | 780 | * sdma_v4_0_gfx_resume - setup and start the async dma engines |
676 | * | 781 | * |
677 | * @adev: amdgpu_device pointer | 782 | * @adev: amdgpu_device pointer |
783 | * @i: instance to resume | ||
678 | * | 784 | * |
679 | * Set up the gfx DMA ring buffers and enable them (VEGA10). | 785 | * Set up the gfx DMA ring buffers and enable them (VEGA10). |
680 | * Returns 0 for success, error for failure. | 786 | * Returns 0 for success, error for failure. |
681 | */ | 787 | */ |
682 | static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) | 788 | static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i) |
683 | { | 789 | { |
684 | struct amdgpu_ring *ring; | 790 | struct amdgpu_ring *ring = &adev->sdma.instance[i].ring; |
685 | u32 rb_cntl, ib_cntl, wptr_poll_cntl; | 791 | u32 rb_cntl, ib_cntl, wptr_poll_cntl; |
686 | u32 rb_bufsz; | ||
687 | u32 wb_offset; | 792 | u32 wb_offset; |
688 | u32 doorbell; | 793 | u32 doorbell; |
689 | u32 doorbell_offset; | 794 | u32 doorbell_offset; |
690 | u32 temp; | ||
691 | u64 wptr_gpu_addr; | 795 | u64 wptr_gpu_addr; |
692 | int i, r; | ||
693 | |||
694 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
695 | ring = &adev->sdma.instance[i].ring; | ||
696 | wb_offset = (ring->rptr_offs * 4); | ||
697 | 796 | ||
698 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); | 797 | wb_offset = (ring->rptr_offs * 4); |
699 | 798 | ||
700 | /* Set ring buffer size in dwords */ | 799 | rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL); |
701 | rb_bufsz = order_base_2(ring->ring_size / 4); | 800 | rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); |
702 | rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); | 801 | WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); |
703 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); | ||
704 | #ifdef __BIG_ENDIAN | ||
705 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); | ||
706 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, | ||
707 | RPTR_WRITEBACK_SWAP_ENABLE, 1); | ||
708 | #endif | ||
709 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); | ||
710 | 802 | ||
711 | /* Initialize the ring buffer's read and write pointers */ | 803 | /* Initialize the ring buffer's read and write pointers */ |
712 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); | 804 | WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0); |
713 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); | 805 | WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0); |
714 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); | 806 | WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0); |
715 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); | 807 | WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0); |
716 | 808 | ||
717 | /* set the wb address whether it's enabled or not */ | 809 | /* set the wb address whether it's enabled or not */ |
718 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), | 810 | WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI, |
719 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); | 811 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); |
720 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), | 812 | WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO, |
721 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); | 813 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); |
722 | 814 | ||
723 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); | 815 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, |
816 | RPTR_WRITEBACK_ENABLE, 1); | ||
724 | 817 | ||
725 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); | 818 | WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8); |
726 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); | 819 | WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40); |
727 | 820 | ||
728 | ring->wptr = 0; | 821 | ring->wptr = 0; |
729 | 822 | ||
730 | /* before programing wptr to a less value, need set minor_ptr_update first */ | 823 | /* before programing wptr to a less value, need set minor_ptr_update first */ |
731 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); | 824 | WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1); |
732 | 825 | ||
733 | if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ | 826 | doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL); |
734 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); | 827 | doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET); |
735 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); | ||
736 | } | ||
737 | 828 | ||
738 | doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); | 829 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, |
739 | doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); | 830 | ring->use_doorbell); |
740 | 831 | doorbell_offset = REG_SET_FIELD(doorbell_offset, | |
741 | if (ring->use_doorbell) { | 832 | SDMA0_GFX_DOORBELL_OFFSET, |
742 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); | ||
743 | doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET, | ||
744 | OFFSET, ring->doorbell_index); | 833 | OFFSET, ring->doorbell_index); |
745 | } else { | 834 | WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell); |
746 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); | 835 | WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset); |
747 | } | 836 | adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, |
748 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); | 837 | ring->doorbell_index); |
749 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); | 838 | |
750 | adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, | 839 | sdma_v4_0_ring_set_wptr(ring); |
751 | ring->doorbell_index); | 840 | |
841 | /* set minor_ptr_update to 0 after wptr programed */ | ||
842 | WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0); | ||
843 | |||
844 | /* setup the wptr shadow polling */ | ||
845 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
846 | WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO, | ||
847 | lower_32_bits(wptr_gpu_addr)); | ||
848 | WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI, | ||
849 | upper_32_bits(wptr_gpu_addr)); | ||
850 | wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL); | ||
851 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, | ||
852 | SDMA0_GFX_RB_WPTR_POLL_CNTL, | ||
853 | F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); | ||
854 | WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl); | ||
855 | |||
856 | /* enable DMA RB */ | ||
857 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | ||
858 | WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl); | ||
859 | |||
860 | ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL); | ||
861 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); | ||
862 | #ifdef __BIG_ENDIAN | ||
863 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); | ||
864 | #endif | ||
865 | /* enable DMA IBs */ | ||
866 | WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); | ||
752 | 867 | ||
753 | if (amdgpu_sriov_vf(adev)) | 868 | ring->sched.ready = true; |
754 | sdma_v4_0_ring_set_wptr(ring); | 869 | } |
755 | 870 | ||
756 | /* set minor_ptr_update to 0 after wptr programed */ | 871 | /** |
757 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); | 872 | * sdma_v4_0_page_resume - setup and start the async dma engines |
873 | * | ||
874 | * @adev: amdgpu_device pointer | ||
875 | * @i: instance to resume | ||
876 | * | ||
877 | * Set up the page DMA ring buffers and enable them (VEGA10). | ||
878 | * Returns 0 for success, error for failure. | ||
879 | */ | ||
880 | static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i) | ||
881 | { | ||
882 | struct amdgpu_ring *ring = &adev->sdma.instance[i].page; | ||
883 | u32 rb_cntl, ib_cntl, wptr_poll_cntl; | ||
884 | u32 wb_offset; | ||
885 | u32 doorbell; | ||
886 | u32 doorbell_offset; | ||
887 | u64 wptr_gpu_addr; | ||
758 | 888 | ||
759 | /* set utc l1 enable flag always to 1 */ | 889 | wb_offset = (ring->rptr_offs * 4); |
760 | temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL)); | ||
761 | temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); | ||
762 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); | ||
763 | 890 | ||
764 | if (!amdgpu_sriov_vf(adev)) { | 891 | rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL); |
765 | /* unhalt engine */ | 892 | rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl); |
766 | temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); | 893 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); |
767 | temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); | ||
768 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp); | ||
769 | } | ||
770 | 894 | ||
771 | /* setup the wptr shadow polling */ | 895 | /* Initialize the ring buffer's read and write pointers */ |
772 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | 896 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0); |
773 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), | 897 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0); |
774 | lower_32_bits(wptr_gpu_addr)); | 898 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0); |
775 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), | 899 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0); |
776 | upper_32_bits(wptr_gpu_addr)); | ||
777 | wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); | ||
778 | if (amdgpu_sriov_vf(adev)) | ||
779 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); | ||
780 | else | ||
781 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); | ||
782 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); | ||
783 | 900 | ||
784 | /* enable DMA RB */ | 901 | /* set the wb address whether it's enabled or not */ |
785 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | 902 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI, |
786 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); | 903 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); |
904 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO, | ||
905 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); | ||
787 | 906 | ||
788 | ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); | 907 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, |
789 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); | 908 | RPTR_WRITEBACK_ENABLE, 1); |
790 | #ifdef __BIG_ENDIAN | ||
791 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); | ||
792 | #endif | ||
793 | /* enable DMA IBs */ | ||
794 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); | ||
795 | 909 | ||
796 | ring->ready = true; | 910 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8); |
911 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40); | ||
797 | 912 | ||
798 | if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ | 913 | ring->wptr = 0; |
799 | sdma_v4_0_ctx_switch_enable(adev, true); | ||
800 | sdma_v4_0_enable(adev, true); | ||
801 | } | ||
802 | 914 | ||
803 | r = amdgpu_ring_test_ring(ring); | 915 | /* before programing wptr to a less value, need set minor_ptr_update first */ |
804 | if (r) { | 916 | WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1); |
805 | ring->ready = false; | ||
806 | return r; | ||
807 | } | ||
808 | 917 | ||
809 | if (adev->mman.buffer_funcs_ring == ring) | 918 | doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL); |
810 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | 919 | doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET); |
811 | 920 | ||
812 | } | 921 | doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE, |
922 | ring->use_doorbell); | ||
923 | doorbell_offset = REG_SET_FIELD(doorbell_offset, | ||
924 | SDMA0_PAGE_DOORBELL_OFFSET, | ||
925 | OFFSET, ring->doorbell_index); | ||
926 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell); | ||
927 | WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset); | ||
928 | /* TODO: enable doorbell support */ | ||
929 | /*adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, | ||
930 | ring->doorbell_index);*/ | ||
931 | |||
932 | sdma_v4_0_ring_set_wptr(ring); | ||
933 | |||
934 | /* set minor_ptr_update to 0 after wptr programed */ | ||
935 | WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0); | ||
936 | |||
937 | /* setup the wptr shadow polling */ | ||
938 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
939 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO, | ||
940 | lower_32_bits(wptr_gpu_addr)); | ||
941 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI, | ||
942 | upper_32_bits(wptr_gpu_addr)); | ||
943 | wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL); | ||
944 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, | ||
945 | SDMA0_PAGE_RB_WPTR_POLL_CNTL, | ||
946 | F32_POLL_ENABLE, amdgpu_sriov_vf(adev)); | ||
947 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl); | ||
948 | |||
949 | /* enable DMA RB */ | ||
950 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1); | ||
951 | WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl); | ||
952 | |||
953 | ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL); | ||
954 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1); | ||
955 | #ifdef __BIG_ENDIAN | ||
956 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1); | ||
957 | #endif | ||
958 | /* enable DMA IBs */ | ||
959 | WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); | ||
813 | 960 | ||
814 | return 0; | 961 | ring->sched.ready = true; |
815 | } | 962 | } |
816 | 963 | ||
817 | static void | 964 | static void |
@@ -922,12 +1069,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) | |||
922 | (adev->sdma.instance[i].fw->data + | 1069 | (adev->sdma.instance[i].fw->data + |
923 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 1070 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
924 | 1071 | ||
925 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0); | 1072 | WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0); |
926 | 1073 | ||
927 | for (j = 0; j < fw_size; j++) | 1074 | for (j = 0; j < fw_size; j++) |
928 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); | 1075 | WREG32_SDMA(i, mmSDMA0_UCODE_DATA, |
1076 | le32_to_cpup(fw_data++)); | ||
929 | 1077 | ||
930 | WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version); | 1078 | WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, |
1079 | adev->sdma.instance[i].fw_version); | ||
931 | } | 1080 | } |
932 | 1081 | ||
933 | return 0; | 1082 | return 0; |
@@ -943,33 +1092,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) | |||
943 | */ | 1092 | */ |
944 | static int sdma_v4_0_start(struct amdgpu_device *adev) | 1093 | static int sdma_v4_0_start(struct amdgpu_device *adev) |
945 | { | 1094 | { |
946 | int r = 0; | 1095 | struct amdgpu_ring *ring; |
1096 | int i, r; | ||
947 | 1097 | ||
948 | if (amdgpu_sriov_vf(adev)) { | 1098 | if (amdgpu_sriov_vf(adev)) { |
949 | sdma_v4_0_ctx_switch_enable(adev, false); | 1099 | sdma_v4_0_ctx_switch_enable(adev, false); |
950 | sdma_v4_0_enable(adev, false); | 1100 | sdma_v4_0_enable(adev, false); |
1101 | } else { | ||
951 | 1102 | ||
952 | /* set RB registers */ | 1103 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
953 | r = sdma_v4_0_gfx_resume(adev); | 1104 | r = sdma_v4_0_load_microcode(adev); |
954 | return r; | 1105 | if (r) |
1106 | return r; | ||
1107 | } | ||
1108 | |||
1109 | /* unhalt the MEs */ | ||
1110 | sdma_v4_0_enable(adev, true); | ||
1111 | /* enable sdma ring preemption */ | ||
1112 | sdma_v4_0_ctx_switch_enable(adev, true); | ||
1113 | } | ||
1114 | |||
1115 | /* start the gfx rings and rlc compute queues */ | ||
1116 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
1117 | uint32_t temp; | ||
1118 | |||
1119 | WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0); | ||
1120 | sdma_v4_0_gfx_resume(adev, i); | ||
1121 | if (adev->sdma.has_page_queue) | ||
1122 | sdma_v4_0_page_resume(adev, i); | ||
1123 | |||
1124 | /* set utc l1 enable flag always to 1 */ | ||
1125 | temp = RREG32_SDMA(i, mmSDMA0_CNTL); | ||
1126 | temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1); | ||
1127 | WREG32_SDMA(i, mmSDMA0_CNTL, temp); | ||
1128 | |||
1129 | if (!amdgpu_sriov_vf(adev)) { | ||
1130 | /* unhalt engine */ | ||
1131 | temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); | ||
1132 | temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); | ||
1133 | WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp); | ||
1134 | } | ||
955 | } | 1135 | } |
956 | 1136 | ||
957 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { | 1137 | if (amdgpu_sriov_vf(adev)) { |
958 | r = sdma_v4_0_load_microcode(adev); | 1138 | sdma_v4_0_ctx_switch_enable(adev, true); |
1139 | sdma_v4_0_enable(adev, true); | ||
1140 | } else { | ||
1141 | r = sdma_v4_0_rlc_resume(adev); | ||
959 | if (r) | 1142 | if (r) |
960 | return r; | 1143 | return r; |
961 | } | 1144 | } |
962 | 1145 | ||
963 | /* unhalt the MEs */ | 1146 | for (i = 0; i < adev->sdma.num_instances; i++) { |
964 | sdma_v4_0_enable(adev, true); | 1147 | ring = &adev->sdma.instance[i].ring; |
965 | /* enable sdma ring preemption */ | ||
966 | sdma_v4_0_ctx_switch_enable(adev, true); | ||
967 | 1148 | ||
968 | /* start the gfx rings and rlc compute queues */ | 1149 | r = amdgpu_ring_test_helper(ring); |
969 | r = sdma_v4_0_gfx_resume(adev); | 1150 | if (r) |
970 | if (r) | 1151 | return r; |
971 | return r; | 1152 | |
972 | r = sdma_v4_0_rlc_resume(adev); | 1153 | if (adev->sdma.has_page_queue) { |
1154 | struct amdgpu_ring *page = &adev->sdma.instance[i].page; | ||
1155 | |||
1156 | r = amdgpu_ring_test_helper(page); | ||
1157 | if (r) | ||
1158 | return r; | ||
1159 | |||
1160 | if (adev->mman.buffer_funcs_ring == page) | ||
1161 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | ||
1162 | } | ||
1163 | |||
1164 | if (adev->mman.buffer_funcs_ring == ring) | ||
1165 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | ||
1166 | } | ||
973 | 1167 | ||
974 | return r; | 1168 | return r; |
975 | } | 1169 | } |
@@ -993,21 +1187,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) | |||
993 | u64 gpu_addr; | 1187 | u64 gpu_addr; |
994 | 1188 | ||
995 | r = amdgpu_device_wb_get(adev, &index); | 1189 | r = amdgpu_device_wb_get(adev, &index); |
996 | if (r) { | 1190 | if (r) |
997 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
998 | return r; | 1191 | return r; |
999 | } | ||
1000 | 1192 | ||
1001 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 1193 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
1002 | tmp = 0xCAFEDEAD; | 1194 | tmp = 0xCAFEDEAD; |
1003 | adev->wb.wb[index] = cpu_to_le32(tmp); | 1195 | adev->wb.wb[index] = cpu_to_le32(tmp); |
1004 | 1196 | ||
1005 | r = amdgpu_ring_alloc(ring, 5); | 1197 | r = amdgpu_ring_alloc(ring, 5); |
1006 | if (r) { | 1198 | if (r) |
1007 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 1199 | goto error_free_wb; |
1008 | amdgpu_device_wb_free(adev, index); | ||
1009 | return r; | ||
1010 | } | ||
1011 | 1200 | ||
1012 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 1201 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
1013 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | 1202 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); |
@@ -1024,15 +1213,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1024 | DRM_UDELAY(1); | 1213 | DRM_UDELAY(1); |
1025 | } | 1214 | } |
1026 | 1215 | ||
1027 | if (i < adev->usec_timeout) { | 1216 | if (i >= adev->usec_timeout) |
1028 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 1217 | r = -ETIMEDOUT; |
1029 | } else { | ||
1030 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
1031 | ring->idx, tmp); | ||
1032 | r = -EINVAL; | ||
1033 | } | ||
1034 | amdgpu_device_wb_free(adev, index); | ||
1035 | 1218 | ||
1219 | error_free_wb: | ||
1220 | amdgpu_device_wb_free(adev, index); | ||
1036 | return r; | 1221 | return r; |
1037 | } | 1222 | } |
1038 | 1223 | ||
@@ -1055,20 +1240,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1055 | u64 gpu_addr; | 1240 | u64 gpu_addr; |
1056 | 1241 | ||
1057 | r = amdgpu_device_wb_get(adev, &index); | 1242 | r = amdgpu_device_wb_get(adev, &index); |
1058 | if (r) { | 1243 | if (r) |
1059 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
1060 | return r; | 1244 | return r; |
1061 | } | ||
1062 | 1245 | ||
1063 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 1246 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
1064 | tmp = 0xCAFEDEAD; | 1247 | tmp = 0xCAFEDEAD; |
1065 | adev->wb.wb[index] = cpu_to_le32(tmp); | 1248 | adev->wb.wb[index] = cpu_to_le32(tmp); |
1066 | memset(&ib, 0, sizeof(ib)); | 1249 | memset(&ib, 0, sizeof(ib)); |
1067 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 1250 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
1068 | if (r) { | 1251 | if (r) |
1069 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
1070 | goto err0; | 1252 | goto err0; |
1071 | } | ||
1072 | 1253 | ||
1073 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | 1254 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | |
1074 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | 1255 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); |
@@ -1087,21 +1268,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1087 | 1268 | ||
1088 | r = dma_fence_wait_timeout(f, false, timeout); | 1269 | r = dma_fence_wait_timeout(f, false, timeout); |
1089 | if (r == 0) { | 1270 | if (r == 0) { |
1090 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
1091 | r = -ETIMEDOUT; | 1271 | r = -ETIMEDOUT; |
1092 | goto err1; | 1272 | goto err1; |
1093 | } else if (r < 0) { | 1273 | } else if (r < 0) { |
1094 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
1095 | goto err1; | 1274 | goto err1; |
1096 | } | 1275 | } |
1097 | tmp = le32_to_cpu(adev->wb.wb[index]); | 1276 | tmp = le32_to_cpu(adev->wb.wb[index]); |
1098 | if (tmp == 0xDEADBEEF) { | 1277 | if (tmp == 0xDEADBEEF) |
1099 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
1100 | r = 0; | 1278 | r = 0; |
1101 | } else { | 1279 | else |
1102 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
1103 | r = -EINVAL; | 1280 | r = -EINVAL; |
1104 | } | 1281 | |
1105 | err1: | 1282 | err1: |
1106 | amdgpu_ib_free(adev, &ib, NULL); | 1283 | amdgpu_ib_free(adev, &ib, NULL); |
1107 | dma_fence_put(f); | 1284 | dma_fence_put(f); |
@@ -1206,7 +1383,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib, | |||
1206 | */ | 1383 | */ |
1207 | static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) | 1384 | static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
1208 | { | 1385 | { |
1209 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); | 1386 | struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); |
1210 | u32 pad_count; | 1387 | u32 pad_count; |
1211 | int i; | 1388 | int i; |
1212 | 1389 | ||
@@ -1276,10 +1453,18 @@ static int sdma_v4_0_early_init(void *handle) | |||
1276 | { | 1453 | { |
1277 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1454 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1278 | 1455 | ||
1279 | if (adev->asic_type == CHIP_RAVEN) | 1456 | if (adev->asic_type == CHIP_RAVEN) { |
1280 | adev->sdma.num_instances = 1; | 1457 | adev->sdma.num_instances = 1; |
1281 | else | 1458 | adev->sdma.has_page_queue = false; |
1459 | } else { | ||
1282 | adev->sdma.num_instances = 2; | 1460 | adev->sdma.num_instances = 2; |
1461 | /* TODO: Page queue breaks driver reload under SRIOV */ | ||
1462 | if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev))) | ||
1463 | adev->sdma.has_page_queue = false; | ||
1464 | else if (adev->asic_type != CHIP_VEGA20 && | ||
1465 | adev->asic_type != CHIP_VEGA12) | ||
1466 | adev->sdma.has_page_queue = true; | ||
1467 | } | ||
1283 | 1468 | ||
1284 | sdma_v4_0_set_ring_funcs(adev); | 1469 | sdma_v4_0_set_ring_funcs(adev); |
1285 | sdma_v4_0_set_buffer_funcs(adev); | 1470 | sdma_v4_0_set_buffer_funcs(adev); |
@@ -1340,6 +1525,21 @@ static int sdma_v4_0_sw_init(void *handle) | |||
1340 | AMDGPU_SDMA_IRQ_TRAP1); | 1525 | AMDGPU_SDMA_IRQ_TRAP1); |
1341 | if (r) | 1526 | if (r) |
1342 | return r; | 1527 | return r; |
1528 | |||
1529 | if (adev->sdma.has_page_queue) { | ||
1530 | ring = &adev->sdma.instance[i].page; | ||
1531 | ring->ring_obj = NULL; | ||
1532 | ring->use_doorbell = false; | ||
1533 | |||
1534 | sprintf(ring->name, "page%d", i); | ||
1535 | r = amdgpu_ring_init(adev, ring, 1024, | ||
1536 | &adev->sdma.trap_irq, | ||
1537 | (i == 0) ? | ||
1538 | AMDGPU_SDMA_IRQ_TRAP0 : | ||
1539 | AMDGPU_SDMA_IRQ_TRAP1); | ||
1540 | if (r) | ||
1541 | return r; | ||
1542 | } | ||
1343 | } | 1543 | } |
1344 | 1544 | ||
1345 | return r; | 1545 | return r; |
@@ -1350,8 +1550,11 @@ static int sdma_v4_0_sw_fini(void *handle) | |||
1350 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1550 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1351 | int i; | 1551 | int i; |
1352 | 1552 | ||
1353 | for (i = 0; i < adev->sdma.num_instances; i++) | 1553 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1354 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1554 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
1555 | if (adev->sdma.has_page_queue) | ||
1556 | amdgpu_ring_fini(&adev->sdma.instance[i].page); | ||
1557 | } | ||
1355 | 1558 | ||
1356 | for (i = 0; i < adev->sdma.num_instances; i++) { | 1559 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1357 | release_firmware(adev->sdma.instance[i].fw); | 1560 | release_firmware(adev->sdma.instance[i].fw); |
@@ -1414,7 +1617,7 @@ static bool sdma_v4_0_is_idle(void *handle) | |||
1414 | u32 i; | 1617 | u32 i; |
1415 | 1618 | ||
1416 | for (i = 0; i < adev->sdma.num_instances; i++) { | 1619 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1417 | u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG)); | 1620 | u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG); |
1418 | 1621 | ||
1419 | if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) | 1622 | if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) |
1420 | return false; | 1623 | return false; |
@@ -1430,8 +1633,8 @@ static int sdma_v4_0_wait_for_idle(void *handle) | |||
1430 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1633 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1431 | 1634 | ||
1432 | for (i = 0; i < adev->usec_timeout; i++) { | 1635 | for (i = 0; i < adev->usec_timeout; i++) { |
1433 | sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG)); | 1636 | sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG); |
1434 | sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG)); | 1637 | sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG); |
1435 | 1638 | ||
1436 | if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) | 1639 | if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK) |
1437 | return 0; | 1640 | return 0; |
@@ -1452,16 +1655,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev, | |||
1452 | unsigned type, | 1655 | unsigned type, |
1453 | enum amdgpu_interrupt_state state) | 1656 | enum amdgpu_interrupt_state state) |
1454 | { | 1657 | { |
1658 | unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1; | ||
1455 | u32 sdma_cntl; | 1659 | u32 sdma_cntl; |
1456 | 1660 | ||
1457 | u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ? | 1661 | sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL); |
1458 | sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) : | ||
1459 | sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL); | ||
1460 | |||
1461 | sdma_cntl = RREG32(reg_offset); | ||
1462 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, | 1662 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, |
1463 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); | 1663 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); |
1464 | WREG32(reg_offset, sdma_cntl); | 1664 | WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl); |
1465 | 1665 | ||
1466 | return 0; | 1666 | return 0; |
1467 | } | 1667 | } |
@@ -1470,39 +1670,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, | |||
1470 | struct amdgpu_irq_src *source, | 1670 | struct amdgpu_irq_src *source, |
1471 | struct amdgpu_iv_entry *entry) | 1671 | struct amdgpu_iv_entry *entry) |
1472 | { | 1672 | { |
1673 | uint32_t instance; | ||
1674 | |||
1473 | DRM_DEBUG("IH: SDMA trap\n"); | 1675 | DRM_DEBUG("IH: SDMA trap\n"); |
1474 | switch (entry->client_id) { | 1676 | switch (entry->client_id) { |
1475 | case SOC15_IH_CLIENTID_SDMA0: | 1677 | case SOC15_IH_CLIENTID_SDMA0: |
1476 | switch (entry->ring_id) { | 1678 | instance = 0; |
1477 | case 0: | ||
1478 | amdgpu_fence_process(&adev->sdma.instance[0].ring); | ||
1479 | break; | ||
1480 | case 1: | ||
1481 | /* XXX compute */ | ||
1482 | break; | ||
1483 | case 2: | ||
1484 | /* XXX compute */ | ||
1485 | break; | ||
1486 | case 3: | ||
1487 | /* XXX page queue*/ | ||
1488 | break; | ||
1489 | } | ||
1490 | break; | 1679 | break; |
1491 | case SOC15_IH_CLIENTID_SDMA1: | 1680 | case SOC15_IH_CLIENTID_SDMA1: |
1492 | switch (entry->ring_id) { | 1681 | instance = 1; |
1493 | case 0: | 1682 | break; |
1494 | amdgpu_fence_process(&adev->sdma.instance[1].ring); | 1683 | default: |
1495 | break; | 1684 | return 0; |
1496 | case 1: | 1685 | } |
1497 | /* XXX compute */ | 1686 | |
1498 | break; | 1687 | switch (entry->ring_id) { |
1499 | case 2: | 1688 | case 0: |
1500 | /* XXX compute */ | 1689 | amdgpu_fence_process(&adev->sdma.instance[instance].ring); |
1501 | break; | 1690 | break; |
1502 | case 3: | 1691 | case 1: |
1503 | /* XXX page queue*/ | 1692 | /* XXX compute */ |
1504 | break; | 1693 | break; |
1505 | } | 1694 | case 2: |
1695 | /* XXX compute */ | ||
1696 | break; | ||
1697 | case 3: | ||
1698 | amdgpu_fence_process(&adev->sdma.instance[instance].page); | ||
1506 | break; | 1699 | break; |
1507 | } | 1700 | } |
1508 | return 0; | 1701 | return 0; |
@@ -1512,12 +1705,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, | |||
1512 | struct amdgpu_irq_src *source, | 1705 | struct amdgpu_irq_src *source, |
1513 | struct amdgpu_iv_entry *entry) | 1706 | struct amdgpu_iv_entry *entry) |
1514 | { | 1707 | { |
1708 | int instance; | ||
1709 | |||
1515 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | 1710 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); |
1516 | schedule_work(&adev->reset_work); | 1711 | |
1712 | switch (entry->client_id) { | ||
1713 | case SOC15_IH_CLIENTID_SDMA0: | ||
1714 | instance = 0; | ||
1715 | break; | ||
1716 | case SOC15_IH_CLIENTID_SDMA1: | ||
1717 | instance = 1; | ||
1718 | break; | ||
1719 | default: | ||
1720 | return 0; | ||
1721 | } | ||
1722 | |||
1723 | switch (entry->ring_id) { | ||
1724 | case 0: | ||
1725 | drm_sched_fault(&adev->sdma.instance[instance].ring.sched); | ||
1726 | break; | ||
1727 | } | ||
1517 | return 0; | 1728 | return 0; |
1518 | } | 1729 | } |
1519 | 1730 | ||
1520 | |||
1521 | static void sdma_v4_0_update_medium_grain_clock_gating( | 1731 | static void sdma_v4_0_update_medium_grain_clock_gating( |
1522 | struct amdgpu_device *adev, | 1732 | struct amdgpu_device *adev, |
1523 | bool enable) | 1733 | bool enable) |
@@ -1730,6 +1940,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { | |||
1730 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, | 1940 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
1731 | }; | 1941 | }; |
1732 | 1942 | ||
1943 | static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = { | ||
1944 | .type = AMDGPU_RING_TYPE_SDMA, | ||
1945 | .align_mask = 0xf, | ||
1946 | .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), | ||
1947 | .support_64bit_ptrs = true, | ||
1948 | .vmhub = AMDGPU_MMHUB, | ||
1949 | .get_rptr = sdma_v4_0_ring_get_rptr, | ||
1950 | .get_wptr = sdma_v4_0_page_ring_get_wptr, | ||
1951 | .set_wptr = sdma_v4_0_page_ring_set_wptr, | ||
1952 | .emit_frame_size = | ||
1953 | 6 + /* sdma_v4_0_ring_emit_hdp_flush */ | ||
1954 | 3 + /* hdp invalidate */ | ||
1955 | 6 + /* sdma_v4_0_ring_emit_pipeline_sync */ | ||
1956 | /* sdma_v4_0_ring_emit_vm_flush */ | ||
1957 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + | ||
1958 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 + | ||
1959 | 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ | ||
1960 | .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ | ||
1961 | .emit_ib = sdma_v4_0_ring_emit_ib, | ||
1962 | .emit_fence = sdma_v4_0_ring_emit_fence, | ||
1963 | .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, | ||
1964 | .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, | ||
1965 | .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, | ||
1966 | .test_ring = sdma_v4_0_ring_test_ring, | ||
1967 | .test_ib = sdma_v4_0_ring_test_ib, | ||
1968 | .insert_nop = sdma_v4_0_ring_insert_nop, | ||
1969 | .pad_ib = sdma_v4_0_ring_pad_ib, | ||
1970 | .emit_wreg = sdma_v4_0_ring_emit_wreg, | ||
1971 | .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, | ||
1972 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, | ||
1973 | }; | ||
1974 | |||
1733 | static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) | 1975 | static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) |
1734 | { | 1976 | { |
1735 | int i; | 1977 | int i; |
@@ -1737,6 +1979,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) | |||
1737 | for (i = 0; i < adev->sdma.num_instances; i++) { | 1979 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1738 | adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; | 1980 | adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; |
1739 | adev->sdma.instance[i].ring.me = i; | 1981 | adev->sdma.instance[i].ring.me = i; |
1982 | if (adev->sdma.has_page_queue) { | ||
1983 | adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; | ||
1984 | adev->sdma.instance[i].page.me = i; | ||
1985 | } | ||
1740 | } | 1986 | } |
1741 | } | 1987 | } |
1742 | 1988 | ||
@@ -1818,7 +2064,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = { | |||
1818 | static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) | 2064 | static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) |
1819 | { | 2065 | { |
1820 | adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; | 2066 | adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; |
1821 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | 2067 | if (adev->sdma.has_page_queue) |
2068 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page; | ||
2069 | else | ||
2070 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; | ||
1822 | } | 2071 | } |
1823 | 2072 | ||
1824 | static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { | 2073 | static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { |
@@ -1836,7 +2085,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
1836 | 2085 | ||
1837 | adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; | 2086 | adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; |
1838 | for (i = 0; i < adev->sdma.num_instances; i++) { | 2087 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1839 | sched = &adev->sdma.instance[i].ring.sched; | 2088 | if (adev->sdma.has_page_queue) |
2089 | sched = &adev->sdma.instance[i].page.sched; | ||
2090 | else | ||
2091 | sched = &adev->sdma.instance[i].ring.sched; | ||
1840 | adev->vm_manager.vm_pte_rqs[i] = | 2092 | adev->vm_manager.vm_pte_rqs[i] = |
1841 | &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; | 2093 | &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; |
1842 | } | 2094 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index adbaea6da0d7..b6e473134e19 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c | |||
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, | 63 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, |
64 | struct amdgpu_job *job, | ||
64 | struct amdgpu_ib *ib, | 65 | struct amdgpu_ib *ib, |
65 | unsigned vmid, bool ctx_switch) | 66 | bool ctx_switch) |
66 | { | 67 | { |
68 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
67 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | 69 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
68 | * Pad as necessary with NOPs. | 70 | * Pad as necessary with NOPs. |
69 | */ | 71 | */ |
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev) | |||
122 | 124 | ||
123 | if (adev->mman.buffer_funcs_ring == ring) | 125 | if (adev->mman.buffer_funcs_ring == ring) |
124 | amdgpu_ttm_set_buffer_funcs_status(adev, false); | 126 | amdgpu_ttm_set_buffer_funcs_status(adev, false); |
125 | ring->ready = false; | 127 | ring->sched.ready = false; |
126 | } | 128 | } |
127 | } | 129 | } |
128 | 130 | ||
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev) | |||
175 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); | 177 | WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); |
176 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); | 178 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); |
177 | 179 | ||
178 | ring->ready = true; | 180 | ring->sched.ready = true; |
179 | 181 | ||
180 | r = amdgpu_ring_test_ring(ring); | 182 | r = amdgpu_ring_test_helper(ring); |
181 | if (r) { | 183 | if (r) |
182 | ring->ready = false; | ||
183 | return r; | 184 | return r; |
184 | } | ||
185 | 185 | ||
186 | if (adev->mman.buffer_funcs_ring == ring) | 186 | if (adev->mman.buffer_funcs_ring == ring) |
187 | amdgpu_ttm_set_buffer_funcs_status(adev, true); | 187 | amdgpu_ttm_set_buffer_funcs_status(adev, true); |
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |||
209 | u64 gpu_addr; | 209 | u64 gpu_addr; |
210 | 210 | ||
211 | r = amdgpu_device_wb_get(adev, &index); | 211 | r = amdgpu_device_wb_get(adev, &index); |
212 | if (r) { | 212 | if (r) |
213 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
214 | return r; | 213 | return r; |
215 | } | ||
216 | 214 | ||
217 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 215 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
218 | tmp = 0xCAFEDEAD; | 216 | tmp = 0xCAFEDEAD; |
219 | adev->wb.wb[index] = cpu_to_le32(tmp); | 217 | adev->wb.wb[index] = cpu_to_le32(tmp); |
220 | 218 | ||
221 | r = amdgpu_ring_alloc(ring, 4); | 219 | r = amdgpu_ring_alloc(ring, 4); |
222 | if (r) { | 220 | if (r) |
223 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | 221 | goto error_free_wb; |
224 | amdgpu_device_wb_free(adev, index); | ||
225 | return r; | ||
226 | } | ||
227 | 222 | ||
228 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); | 223 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); |
229 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | 224 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); |
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring) | |||
238 | DRM_UDELAY(1); | 233 | DRM_UDELAY(1); |
239 | } | 234 | } |
240 | 235 | ||
241 | if (i < adev->usec_timeout) { | 236 | if (i >= adev->usec_timeout) |
242 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i); | 237 | r = -ETIMEDOUT; |
243 | } else { | ||
244 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
245 | ring->idx, tmp); | ||
246 | r = -EINVAL; | ||
247 | } | ||
248 | amdgpu_device_wb_free(adev, index); | ||
249 | 238 | ||
239 | error_free_wb: | ||
240 | amdgpu_device_wb_free(adev, index); | ||
250 | return r; | 241 | return r; |
251 | } | 242 | } |
252 | 243 | ||
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
269 | long r; | 260 | long r; |
270 | 261 | ||
271 | r = amdgpu_device_wb_get(adev, &index); | 262 | r = amdgpu_device_wb_get(adev, &index); |
272 | if (r) { | 263 | if (r) |
273 | dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); | ||
274 | return r; | 264 | return r; |
275 | } | ||
276 | 265 | ||
277 | gpu_addr = adev->wb.gpu_addr + (index * 4); | 266 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
278 | tmp = 0xCAFEDEAD; | 267 | tmp = 0xCAFEDEAD; |
279 | adev->wb.wb[index] = cpu_to_le32(tmp); | 268 | adev->wb.wb[index] = cpu_to_le32(tmp); |
280 | memset(&ib, 0, sizeof(ib)); | 269 | memset(&ib, 0, sizeof(ib)); |
281 | r = amdgpu_ib_get(adev, NULL, 256, &ib); | 270 | r = amdgpu_ib_get(adev, NULL, 256, &ib); |
282 | if (r) { | 271 | if (r) |
283 | DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); | ||
284 | goto err0; | 272 | goto err0; |
285 | } | ||
286 | 273 | ||
287 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); | 274 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); |
288 | ib.ptr[1] = lower_32_bits(gpu_addr); | 275 | ib.ptr[1] = lower_32_bits(gpu_addr); |
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
295 | 282 | ||
296 | r = dma_fence_wait_timeout(f, false, timeout); | 283 | r = dma_fence_wait_timeout(f, false, timeout); |
297 | if (r == 0) { | 284 | if (r == 0) { |
298 | DRM_ERROR("amdgpu: IB test timed out\n"); | ||
299 | r = -ETIMEDOUT; | 285 | r = -ETIMEDOUT; |
300 | goto err1; | 286 | goto err1; |
301 | } else if (r < 0) { | 287 | } else if (r < 0) { |
302 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
303 | goto err1; | 288 | goto err1; |
304 | } | 289 | } |
305 | tmp = le32_to_cpu(adev->wb.wb[index]); | 290 | tmp = le32_to_cpu(adev->wb.wb[index]); |
306 | if (tmp == 0xDEADBEEF) { | 291 | if (tmp == 0xDEADBEEF) |
307 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
308 | r = 0; | 292 | r = 0; |
309 | } else { | 293 | else |
310 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
311 | r = -EINVAL; | 294 | r = -EINVAL; |
312 | } | ||
313 | 295 | ||
314 | err1: | 296 | err1: |
315 | amdgpu_ib_free(adev, &ib, NULL); | 297 | amdgpu_ib_free(adev, &ib, NULL); |
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev, | |||
658 | return 0; | 640 | return 0; |
659 | } | 641 | } |
660 | 642 | ||
661 | static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev, | ||
662 | struct amdgpu_irq_src *source, | ||
663 | struct amdgpu_iv_entry *entry) | ||
664 | { | ||
665 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | ||
666 | schedule_work(&adev->reset_work); | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | static int si_dma_set_clockgating_state(void *handle, | 643 | static int si_dma_set_clockgating_state(void *handle, |
671 | enum amd_clockgating_state state) | 644 | enum amd_clockgating_state state) |
672 | { | 645 | { |
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { | |||
781 | .process = si_dma_process_trap_irq, | 754 | .process = si_dma_process_trap_irq, |
782 | }; | 755 | }; |
783 | 756 | ||
784 | static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { | ||
785 | .process = si_dma_process_illegal_inst_irq, | ||
786 | }; | ||
787 | |||
788 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) | 757 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) |
789 | { | 758 | { |
790 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | 759 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
791 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; | 760 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; |
792 | adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; | ||
793 | } | 761 | } |
794 | 762 | ||
795 | /** | 763 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h new file mode 100644 index 000000000000..ac2c27b7630c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef _TA_XGMI_IF_H | ||
25 | #define _TA_XGMI_IF_H | ||
26 | |||
27 | /* Responses have bit 31 set */ | ||
28 | #define RSP_ID_MASK (1U << 31) | ||
29 | #define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK) | ||
30 | |||
31 | enum ta_command_xgmi { | ||
32 | TA_COMMAND_XGMI__INITIALIZE = 0x00, | ||
33 | TA_COMMAND_XGMI__GET_NODE_ID = 0x01, | ||
34 | TA_COMMAND_XGMI__GET_HIVE_ID = 0x02, | ||
35 | TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03, | ||
36 | TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04 | ||
37 | }; | ||
38 | |||
39 | /* XGMI related enumerations */ | ||
40 | /**********************************************************/; | ||
41 | enum ta_xgmi_connected_nodes { | ||
42 | TA_XGMI__MAX_CONNECTED_NODES = 64 | ||
43 | }; | ||
44 | |||
45 | enum ta_xgmi_status { | ||
46 | TA_XGMI_STATUS__SUCCESS = 0x00, | ||
47 | TA_XGMI_STATUS__GENERIC_FAILURE = 0x01, | ||
48 | TA_XGMI_STATUS__NULL_POINTER = 0x02, | ||
49 | TA_XGMI_STATUS__INVALID_PARAMETER = 0x03, | ||
50 | TA_XGMI_STATUS__NOT_INITIALIZED = 0x04, | ||
51 | TA_XGMI_STATUS__INVALID_NODE_NUM = 0x05, | ||
52 | TA_XGMI_STATUS__INVALID_NODE_ID = 0x06, | ||
53 | TA_XGMI_STATUS__INVALID_TOPOLOGY = 0x07, | ||
54 | TA_XGMI_STATUS__FAILED_ID_GEN = 0x08, | ||
55 | TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT = 0x09, | ||
56 | TA_XGMI_STATUS__SET_SHARING_ERROR = 0x0A | ||
57 | }; | ||
58 | |||
59 | enum ta_xgmi_assigned_sdma_engine { | ||
60 | TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED = -1, | ||
61 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0 = 0, | ||
62 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1 = 1, | ||
63 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2 = 2, | ||
64 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3 = 3, | ||
65 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4 = 4, | ||
66 | TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5 = 5 | ||
67 | }; | ||
68 | |||
69 | /* input/output structures for XGMI commands */ | ||
70 | /**********************************************************/ | ||
71 | struct ta_xgmi_node_info { | ||
72 | uint64_t node_id; | ||
73 | uint8_t num_hops; | ||
74 | uint8_t is_sharing_enabled; | ||
75 | enum ta_xgmi_assigned_sdma_engine sdma_engine; | ||
76 | }; | ||
77 | |||
78 | struct ta_xgmi_cmd_initialize_output { | ||
79 | uint32_t status; | ||
80 | }; | ||
81 | |||
82 | struct ta_xgmi_cmd_get_node_id_output { | ||
83 | uint64_t node_id; | ||
84 | }; | ||
85 | |||
86 | struct ta_xgmi_cmd_get_hive_id_output { | ||
87 | uint64_t hive_id; | ||
88 | }; | ||
89 | |||
90 | struct ta_xgmi_cmd_get_topology_info_input { | ||
91 | uint32_t num_nodes; | ||
92 | struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; | ||
93 | }; | ||
94 | |||
95 | struct ta_xgmi_cmd_get_topology_info_output { | ||
96 | uint32_t num_nodes; | ||
97 | struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; | ||
98 | }; | ||
99 | |||
100 | struct ta_xgmi_cmd_set_topology_info_input { | ||
101 | uint32_t num_nodes; | ||
102 | struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES]; | ||
103 | }; | ||
104 | |||
105 | /**********************************************************/ | ||
106 | /* Common input structure for XGMI callbacks */ | ||
107 | union ta_xgmi_cmd_input { | ||
108 | struct ta_xgmi_cmd_get_topology_info_input get_topology_info; | ||
109 | struct ta_xgmi_cmd_set_topology_info_input set_topology_info; | ||
110 | }; | ||
111 | |||
112 | /* Common output structure for XGMI callbacks */ | ||
113 | union ta_xgmi_cmd_output { | ||
114 | struct ta_xgmi_cmd_initialize_output initialize; | ||
115 | struct ta_xgmi_cmd_get_node_id_output get_node_id; | ||
116 | struct ta_xgmi_cmd_get_hive_id_output get_hive_id; | ||
117 | struct ta_xgmi_cmd_get_topology_info_output get_topology_info; | ||
118 | }; | ||
119 | /**********************************************************/ | ||
120 | |||
121 | struct ta_xgmi_shared_memory { | ||
122 | uint32_t cmd_id; | ||
123 | uint32_t resp_id; | ||
124 | enum ta_xgmi_status xgmi_status; | ||
125 | uint32_t reserved; | ||
126 | union ta_xgmi_cmd_input xgmi_in_message; | ||
127 | union ta_xgmi_cmd_output xgmi_out_message; | ||
128 | }; | ||
129 | |||
130 | #endif //_TA_XGMI_IF_H | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 1fc17bf39fed..90bbcee00f28 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | |||
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle) | |||
162 | uvd_v4_2_enable_mgcg(adev, true); | 162 | uvd_v4_2_enable_mgcg(adev, true); |
163 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); | 163 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
164 | 164 | ||
165 | ring->ready = true; | 165 | r = amdgpu_ring_test_helper(ring); |
166 | r = amdgpu_ring_test_ring(ring); | 166 | if (r) |
167 | if (r) { | ||
168 | ring->ready = false; | ||
169 | goto done; | 167 | goto done; |
170 | } | ||
171 | 168 | ||
172 | r = amdgpu_ring_alloc(ring, 10); | 169 | r = amdgpu_ring_alloc(ring, 10); |
173 | if (r) { | 170 | if (r) { |
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle) | |||
218 | if (RREG32(mmUVD_STATUS) != 0) | 215 | if (RREG32(mmUVD_STATUS) != 0) |
219 | uvd_v4_2_stop(adev); | 216 | uvd_v4_2_stop(adev); |
220 | 217 | ||
221 | ring->ready = false; | 218 | ring->sched.ready = false; |
222 | 219 | ||
223 | return 0; | 220 | return 0; |
224 | } | 221 | } |
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) | |||
484 | 481 | ||
485 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 482 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
486 | r = amdgpu_ring_alloc(ring, 3); | 483 | r = amdgpu_ring_alloc(ring, 3); |
487 | if (r) { | 484 | if (r) |
488 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
489 | ring->idx, r); | ||
490 | return r; | 485 | return r; |
491 | } | 486 | |
492 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 487 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
493 | amdgpu_ring_write(ring, 0xDEADBEEF); | 488 | amdgpu_ring_write(ring, 0xDEADBEEF); |
494 | amdgpu_ring_commit(ring); | 489 | amdgpu_ring_commit(ring); |
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) | |||
499 | DRM_UDELAY(1); | 494 | DRM_UDELAY(1); |
500 | } | 495 | } |
501 | 496 | ||
502 | if (i < adev->usec_timeout) { | 497 | if (i >= adev->usec_timeout) |
503 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 498 | r = -ETIMEDOUT; |
504 | ring->idx, i); | 499 | |
505 | } else { | ||
506 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
507 | ring->idx, tmp); | ||
508 | r = -EINVAL; | ||
509 | } | ||
510 | return r; | 500 | return r; |
511 | } | 501 | } |
512 | 502 | ||
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) | |||
519 | * Write ring commands to execute the indirect buffer | 509 | * Write ring commands to execute the indirect buffer |
520 | */ | 510 | */ |
521 | static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, | 511 | static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, |
512 | struct amdgpu_job *job, | ||
522 | struct amdgpu_ib *ib, | 513 | struct amdgpu_ib *ib, |
523 | unsigned vmid, bool ctx_switch) | 514 | bool ctx_switch) |
524 | { | 515 | { |
525 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); | 516 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); |
526 | amdgpu_ring_write(ring, ib->gpu_addr); | 517 | amdgpu_ring_write(ring, ib->gpu_addr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index fde6ad5ac9ab..1c5e12703103 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle) | |||
158 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); | 158 | uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); |
159 | uvd_v5_0_enable_mgcg(adev, true); | 159 | uvd_v5_0_enable_mgcg(adev, true); |
160 | 160 | ||
161 | ring->ready = true; | 161 | r = amdgpu_ring_test_helper(ring); |
162 | r = amdgpu_ring_test_ring(ring); | 162 | if (r) |
163 | if (r) { | ||
164 | ring->ready = false; | ||
165 | goto done; | 163 | goto done; |
166 | } | ||
167 | 164 | ||
168 | r = amdgpu_ring_alloc(ring, 10); | 165 | r = amdgpu_ring_alloc(ring, 10); |
169 | if (r) { | 166 | if (r) { |
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle) | |||
215 | if (RREG32(mmUVD_STATUS) != 0) | 212 | if (RREG32(mmUVD_STATUS) != 0) |
216 | uvd_v5_0_stop(adev); | 213 | uvd_v5_0_stop(adev); |
217 | 214 | ||
218 | ring->ready = false; | 215 | ring->sched.ready = false; |
219 | 216 | ||
220 | return 0; | 217 | return 0; |
221 | } | 218 | } |
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |||
500 | 497 | ||
501 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 498 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
502 | r = amdgpu_ring_alloc(ring, 3); | 499 | r = amdgpu_ring_alloc(ring, 3); |
503 | if (r) { | 500 | if (r) |
504 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
505 | ring->idx, r); | ||
506 | return r; | 501 | return r; |
507 | } | ||
508 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 502 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
509 | amdgpu_ring_write(ring, 0xDEADBEEF); | 503 | amdgpu_ring_write(ring, 0xDEADBEEF); |
510 | amdgpu_ring_commit(ring); | 504 | amdgpu_ring_commit(ring); |
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |||
515 | DRM_UDELAY(1); | 509 | DRM_UDELAY(1); |
516 | } | 510 | } |
517 | 511 | ||
518 | if (i < adev->usec_timeout) { | 512 | if (i >= adev->usec_timeout) |
519 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 513 | r = -ETIMEDOUT; |
520 | ring->idx, i); | 514 | |
521 | } else { | ||
522 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
523 | ring->idx, tmp); | ||
524 | r = -EINVAL; | ||
525 | } | ||
526 | return r; | 515 | return r; |
527 | } | 516 | } |
528 | 517 | ||
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | |||
535 | * Write ring commands to execute the indirect buffer | 524 | * Write ring commands to execute the indirect buffer |
536 | */ | 525 | */ |
537 | static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | 526 | static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, |
527 | struct amdgpu_job *job, | ||
538 | struct amdgpu_ib *ib, | 528 | struct amdgpu_ib *ib, |
539 | unsigned vmid, bool ctx_switch) | 529 | bool ctx_switch) |
540 | { | 530 | { |
541 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | 531 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); |
542 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 532 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 7a5b40275e8e..f184842ef2a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
175 | int r; | 175 | int r; |
176 | 176 | ||
177 | r = amdgpu_ring_alloc(ring, 16); | 177 | r = amdgpu_ring_alloc(ring, 16); |
178 | if (r) { | 178 | if (r) |
179 | DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", | ||
180 | ring->idx, r); | ||
181 | return r; | 179 | return r; |
182 | } | ||
183 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); | 180 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
184 | amdgpu_ring_commit(ring); | 181 | amdgpu_ring_commit(ring); |
185 | 182 | ||
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
189 | DRM_UDELAY(1); | 186 | DRM_UDELAY(1); |
190 | } | 187 | } |
191 | 188 | ||
192 | if (i < adev->usec_timeout) { | 189 | if (i >= adev->usec_timeout) |
193 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | ||
194 | ring->idx, i); | ||
195 | } else { | ||
196 | DRM_ERROR("amdgpu: ring %d test failed\n", | ||
197 | ring->idx); | ||
198 | r = -ETIMEDOUT; | 190 | r = -ETIMEDOUT; |
199 | } | ||
200 | 191 | ||
201 | return r; | 192 | return r; |
202 | } | 193 | } |
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
336 | long r; | 327 | long r; |
337 | 328 | ||
338 | r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); | 329 | r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL); |
339 | if (r) { | 330 | if (r) |
340 | DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); | ||
341 | goto error; | 331 | goto error; |
342 | } | ||
343 | 332 | ||
344 | r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); | 333 | r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); |
345 | if (r) { | 334 | if (r) |
346 | DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); | ||
347 | goto error; | 335 | goto error; |
348 | } | ||
349 | 336 | ||
350 | r = dma_fence_wait_timeout(fence, false, timeout); | 337 | r = dma_fence_wait_timeout(fence, false, timeout); |
351 | if (r == 0) { | 338 | if (r == 0) |
352 | DRM_ERROR("amdgpu: IB test timed out.\n"); | ||
353 | r = -ETIMEDOUT; | 339 | r = -ETIMEDOUT; |
354 | } else if (r < 0) { | 340 | else if (r > 0) |
355 | DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); | ||
356 | } else { | ||
357 | DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); | ||
358 | r = 0; | 341 | r = 0; |
359 | } | 342 | |
360 | error: | 343 | error: |
361 | dma_fence_put(fence); | 344 | dma_fence_put(fence); |
362 | return r; | 345 | return r; |
363 | } | 346 | } |
347 | |||
364 | static int uvd_v6_0_early_init(void *handle) | 348 | static int uvd_v6_0_early_init(void *handle) |
365 | { | 349 | { |
366 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 350 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle) | |||
476 | uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); | 460 | uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); |
477 | uvd_v6_0_enable_mgcg(adev, true); | 461 | uvd_v6_0_enable_mgcg(adev, true); |
478 | 462 | ||
479 | ring->ready = true; | 463 | r = amdgpu_ring_test_helper(ring); |
480 | r = amdgpu_ring_test_ring(ring); | 464 | if (r) |
481 | if (r) { | ||
482 | ring->ready = false; | ||
483 | goto done; | 465 | goto done; |
484 | } | ||
485 | 466 | ||
486 | r = amdgpu_ring_alloc(ring, 10); | 467 | r = amdgpu_ring_alloc(ring, 10); |
487 | if (r) { | 468 | if (r) { |
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle) | |||
513 | if (uvd_v6_0_enc_support(adev)) { | 494 | if (uvd_v6_0_enc_support(adev)) { |
514 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 495 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
515 | ring = &adev->uvd.inst->ring_enc[i]; | 496 | ring = &adev->uvd.inst->ring_enc[i]; |
516 | ring->ready = true; | 497 | r = amdgpu_ring_test_helper(ring); |
517 | r = amdgpu_ring_test_ring(ring); | 498 | if (r) |
518 | if (r) { | ||
519 | ring->ready = false; | ||
520 | goto done; | 499 | goto done; |
521 | } | ||
522 | } | 500 | } |
523 | } | 501 | } |
524 | 502 | ||
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle) | |||
548 | if (RREG32(mmUVD_STATUS) != 0) | 526 | if (RREG32(mmUVD_STATUS) != 0) |
549 | uvd_v6_0_stop(adev); | 527 | uvd_v6_0_stop(adev); |
550 | 528 | ||
551 | ring->ready = false; | 529 | ring->sched.ready = false; |
552 | 530 | ||
553 | return 0; | 531 | return 0; |
554 | } | 532 | } |
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
969 | 947 | ||
970 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 948 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
971 | r = amdgpu_ring_alloc(ring, 3); | 949 | r = amdgpu_ring_alloc(ring, 3); |
972 | if (r) { | 950 | if (r) |
973 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
974 | ring->idx, r); | ||
975 | return r; | 951 | return r; |
976 | } | 952 | |
977 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | 953 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
978 | amdgpu_ring_write(ring, 0xDEADBEEF); | 954 | amdgpu_ring_write(ring, 0xDEADBEEF); |
979 | amdgpu_ring_commit(ring); | 955 | amdgpu_ring_commit(ring); |
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
984 | DRM_UDELAY(1); | 960 | DRM_UDELAY(1); |
985 | } | 961 | } |
986 | 962 | ||
987 | if (i < adev->usec_timeout) { | 963 | if (i >= adev->usec_timeout) |
988 | DRM_DEBUG("ring test on %d succeeded in %d usecs\n", | 964 | r = -ETIMEDOUT; |
989 | ring->idx, i); | 965 | |
990 | } else { | ||
991 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
992 | ring->idx, tmp); | ||
993 | r = -EINVAL; | ||
994 | } | ||
995 | return r; | 966 | return r; |
996 | } | 967 | } |
997 | 968 | ||
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1004 | * Write ring commands to execute the indirect buffer | 975 | * Write ring commands to execute the indirect buffer |
1005 | */ | 976 | */ |
1006 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | 977 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, |
978 | struct amdgpu_job *job, | ||
1007 | struct amdgpu_ib *ib, | 979 | struct amdgpu_ib *ib, |
1008 | unsigned vmid, bool ctx_switch) | 980 | bool ctx_switch) |
1009 | { | 981 | { |
982 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
983 | |||
1010 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); | 984 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); |
1011 | amdgpu_ring_write(ring, vmid); | 985 | amdgpu_ring_write(ring, vmid); |
1012 | 986 | ||
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
1027 | * Write enc ring commands to execute the indirect buffer | 1001 | * Write enc ring commands to execute the indirect buffer |
1028 | */ | 1002 | */ |
1029 | static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, | 1003 | static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1030 | struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) | 1004 | struct amdgpu_job *job, |
1005 | struct amdgpu_ib *ib, | ||
1006 | bool ctx_switch) | ||
1031 | { | 1007 | { |
1008 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1009 | |||
1032 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); | 1010 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); |
1033 | amdgpu_ring_write(ring, vmid); | 1011 | amdgpu_ring_write(ring, vmid); |
1034 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 1012 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 58b39afcfb86..8a4595968d98 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | |||
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
183 | return 0; | 183 | return 0; |
184 | 184 | ||
185 | r = amdgpu_ring_alloc(ring, 16); | 185 | r = amdgpu_ring_alloc(ring, 16); |
186 | if (r) { | 186 | if (r) |
187 | DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", | ||
188 | ring->me, ring->idx, r); | ||
189 | return r; | 187 | return r; |
190 | } | ||
191 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); | 188 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
192 | amdgpu_ring_commit(ring); | 189 | amdgpu_ring_commit(ring); |
193 | 190 | ||
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
197 | DRM_UDELAY(1); | 194 | DRM_UDELAY(1); |
198 | } | 195 | } |
199 | 196 | ||
200 | if (i < adev->usec_timeout) { | 197 | if (i >= adev->usec_timeout) |
201 | DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", | ||
202 | ring->me, ring->idx, i); | ||
203 | } else { | ||
204 | DRM_ERROR("amdgpu: (%d)ring %d test failed\n", | ||
205 | ring->me, ring->idx); | ||
206 | r = -ETIMEDOUT; | 198 | r = -ETIMEDOUT; |
207 | } | ||
208 | 199 | ||
209 | return r; | 200 | return r; |
210 | } | 201 | } |
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
343 | long r; | 334 | long r; |
344 | 335 | ||
345 | r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); | 336 | r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); |
346 | if (r) { | 337 | if (r) |
347 | DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); | ||
348 | goto error; | 338 | goto error; |
349 | } | ||
350 | 339 | ||
351 | r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); | 340 | r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); |
352 | if (r) { | 341 | if (r) |
353 | DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); | ||
354 | goto error; | 342 | goto error; |
355 | } | ||
356 | 343 | ||
357 | r = dma_fence_wait_timeout(fence, false, timeout); | 344 | r = dma_fence_wait_timeout(fence, false, timeout); |
358 | if (r == 0) { | 345 | if (r == 0) |
359 | DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); | ||
360 | r = -ETIMEDOUT; | 346 | r = -ETIMEDOUT; |
361 | } else if (r < 0) { | 347 | else if (r > 0) |
362 | DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); | ||
363 | } else { | ||
364 | DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); | ||
365 | r = 0; | 348 | r = 0; |
366 | } | 349 | |
367 | error: | 350 | error: |
368 | dma_fence_put(fence); | 351 | dma_fence_put(fence); |
369 | return r; | 352 | return r; |
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle) | |||
540 | ring = &adev->uvd.inst[j].ring; | 523 | ring = &adev->uvd.inst[j].ring; |
541 | 524 | ||
542 | if (!amdgpu_sriov_vf(adev)) { | 525 | if (!amdgpu_sriov_vf(adev)) { |
543 | ring->ready = true; | 526 | r = amdgpu_ring_test_helper(ring); |
544 | r = amdgpu_ring_test_ring(ring); | 527 | if (r) |
545 | if (r) { | ||
546 | ring->ready = false; | ||
547 | goto done; | 528 | goto done; |
548 | } | ||
549 | 529 | ||
550 | r = amdgpu_ring_alloc(ring, 10); | 530 | r = amdgpu_ring_alloc(ring, 10); |
551 | if (r) { | 531 | if (r) { |
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle) | |||
582 | 562 | ||
583 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 563 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
584 | ring = &adev->uvd.inst[j].ring_enc[i]; | 564 | ring = &adev->uvd.inst[j].ring_enc[i]; |
585 | ring->ready = true; | 565 | r = amdgpu_ring_test_helper(ring); |
586 | r = amdgpu_ring_test_ring(ring); | 566 | if (r) |
587 | if (r) { | ||
588 | ring->ready = false; | ||
589 | goto done; | 567 | goto done; |
590 | } | ||
591 | } | 568 | } |
592 | } | 569 | } |
593 | done: | 570 | done: |
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle) | |||
619 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { | 596 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
620 | if (adev->uvd.harvest_config & (1 << i)) | 597 | if (adev->uvd.harvest_config & (1 << i)) |
621 | continue; | 598 | continue; |
622 | adev->uvd.inst[i].ring.ready = false; | 599 | adev->uvd.inst[i].ring.sched.ready = false; |
623 | } | 600 | } |
624 | 601 | ||
625 | return 0; | 602 | return 0; |
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1235 | 1212 | ||
1236 | WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); | 1213 | WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
1237 | r = amdgpu_ring_alloc(ring, 3); | 1214 | r = amdgpu_ring_alloc(ring, 3); |
1238 | if (r) { | 1215 | if (r) |
1239 | DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", | ||
1240 | ring->me, ring->idx, r); | ||
1241 | return r; | 1216 | return r; |
1242 | } | 1217 | |
1243 | amdgpu_ring_write(ring, | 1218 | amdgpu_ring_write(ring, |
1244 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); | 1219 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); |
1245 | amdgpu_ring_write(ring, 0xDEADBEEF); | 1220 | amdgpu_ring_write(ring, 0xDEADBEEF); |
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) | |||
1251 | DRM_UDELAY(1); | 1226 | DRM_UDELAY(1); |
1252 | } | 1227 | } |
1253 | 1228 | ||
1254 | if (i < adev->usec_timeout) { | 1229 | if (i >= adev->usec_timeout) |
1255 | DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", | 1230 | r = -ETIMEDOUT; |
1256 | ring->me, ring->idx, i); | 1231 | |
1257 | } else { | ||
1258 | DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", | ||
1259 | ring->me, ring->idx, tmp); | ||
1260 | r = -EINVAL; | ||
1261 | } | ||
1262 | return r; | 1232 | return r; |
1263 | } | 1233 | } |
1264 | 1234 | ||
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, | |||
1300 | * Write ring commands to execute the indirect buffer | 1270 | * Write ring commands to execute the indirect buffer |
1301 | */ | 1271 | */ |
1302 | static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | 1272 | static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, |
1273 | struct amdgpu_job *job, | ||
1303 | struct amdgpu_ib *ib, | 1274 | struct amdgpu_ib *ib, |
1304 | unsigned vmid, bool ctx_switch) | 1275 | bool ctx_switch) |
1305 | { | 1276 | { |
1306 | struct amdgpu_device *adev = ring->adev; | 1277 | struct amdgpu_device *adev = ring->adev; |
1278 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1307 | 1279 | ||
1308 | amdgpu_ring_write(ring, | 1280 | amdgpu_ring_write(ring, |
1309 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); | 1281 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); |
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, | |||
1329 | * Write enc ring commands to execute the indirect buffer | 1301 | * Write enc ring commands to execute the indirect buffer |
1330 | */ | 1302 | */ |
1331 | static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, | 1303 | static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1332 | struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) | 1304 | struct amdgpu_job *job, |
1305 | struct amdgpu_ib *ib, | ||
1306 | bool ctx_switch) | ||
1333 | { | 1307 | { |
1308 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1309 | |||
1334 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); | 1310 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); |
1335 | amdgpu_ring_write(ring, vmid); | 1311 | amdgpu_ring_write(ring, vmid); |
1336 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 1312 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index ea28828360d3..bed78a778e3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c | |||
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle) | |||
463 | 463 | ||
464 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); | 464 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); |
465 | vce_v2_0_enable_mgcg(adev, true, false); | 465 | vce_v2_0_enable_mgcg(adev, true, false); |
466 | for (i = 0; i < adev->vce.num_rings; i++) | ||
467 | adev->vce.ring[i].ready = false; | ||
468 | 466 | ||
469 | for (i = 0; i < adev->vce.num_rings; i++) { | 467 | for (i = 0; i < adev->vce.num_rings; i++) { |
470 | r = amdgpu_ring_test_ring(&adev->vce.ring[i]); | 468 | r = amdgpu_ring_test_helper(&adev->vce.ring[i]); |
471 | if (r) | 469 | if (r) |
472 | return r; | 470 | return r; |
473 | else | ||
474 | adev->vce.ring[i].ready = true; | ||
475 | } | 471 | } |
476 | 472 | ||
477 | DRM_INFO("VCE initialized successfully.\n"); | 473 | DRM_INFO("VCE initialized successfully.\n"); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6dbd39730070..3e84840859a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -474,15 +474,10 @@ static int vce_v3_0_hw_init(void *handle) | |||
474 | 474 | ||
475 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); | 475 | amdgpu_asic_set_vce_clocks(adev, 10000, 10000); |
476 | 476 | ||
477 | for (i = 0; i < adev->vce.num_rings; i++) | ||
478 | adev->vce.ring[i].ready = false; | ||
479 | |||
480 | for (i = 0; i < adev->vce.num_rings; i++) { | 477 | for (i = 0; i < adev->vce.num_rings; i++) { |
481 | r = amdgpu_ring_test_ring(&adev->vce.ring[i]); | 478 | r = amdgpu_ring_test_helper(&adev->vce.ring[i]); |
482 | if (r) | 479 | if (r) |
483 | return r; | 480 | return r; |
484 | else | ||
485 | adev->vce.ring[i].ready = true; | ||
486 | } | 481 | } |
487 | 482 | ||
488 | DRM_INFO("VCE initialized successfully.\n"); | 483 | DRM_INFO("VCE initialized successfully.\n"); |
@@ -838,8 +833,12 @@ out: | |||
838 | } | 833 | } |
839 | 834 | ||
840 | static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | 835 | static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, |
841 | struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) | 836 | struct amdgpu_job *job, |
837 | struct amdgpu_ib *ib, | ||
838 | bool ctx_switch) | ||
842 | { | 839 | { |
840 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
841 | |||
843 | amdgpu_ring_write(ring, VCE_CMD_IB_VM); | 842 | amdgpu_ring_write(ring, VCE_CMD_IB_VM); |
844 | amdgpu_ring_write(ring, vmid); | 843 | amdgpu_ring_write(ring, vmid); |
845 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 844 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1c9471890bf7..0054ba1b9a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | |||
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle) | |||
519 | if (r) | 519 | if (r) |
520 | return r; | 520 | return r; |
521 | 521 | ||
522 | for (i = 0; i < adev->vce.num_rings; i++) | ||
523 | adev->vce.ring[i].ready = false; | ||
524 | |||
525 | for (i = 0; i < adev->vce.num_rings; i++) { | 522 | for (i = 0; i < adev->vce.num_rings; i++) { |
526 | r = amdgpu_ring_test_ring(&adev->vce.ring[i]); | 523 | r = amdgpu_ring_test_helper(&adev->vce.ring[i]); |
527 | if (r) | 524 | if (r) |
528 | return r; | 525 | return r; |
529 | else | ||
530 | adev->vce.ring[i].ready = true; | ||
531 | } | 526 | } |
532 | 527 | ||
533 | DRM_INFO("VCE initialized successfully.\n"); | 528 | DRM_INFO("VCE initialized successfully.\n"); |
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle) | |||
549 | } | 544 | } |
550 | 545 | ||
551 | for (i = 0; i < adev->vce.num_rings; i++) | 546 | for (i = 0; i < adev->vce.num_rings; i++) |
552 | adev->vce.ring[i].ready = false; | 547 | adev->vce.ring[i].sched.ready = false; |
553 | 548 | ||
554 | return 0; | 549 | return 0; |
555 | } | 550 | } |
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle, | |||
951 | } | 946 | } |
952 | #endif | 947 | #endif |
953 | 948 | ||
954 | static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, | 949 | static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, |
955 | struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) | 950 | struct amdgpu_ib *ib, bool ctx_switch) |
956 | { | 951 | { |
952 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
953 | |||
957 | amdgpu_ring_write(ring, VCE_CMD_IB_VM); | 954 | amdgpu_ring_write(ring, VCE_CMD_IB_VM); |
958 | amdgpu_ring_write(ring, vmid); | 955 | amdgpu_ring_write(ring, vmid); |
959 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 956 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index eae90922fdbe..c1a03505f956 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |||
@@ -176,30 +176,22 @@ static int vcn_v1_0_hw_init(void *handle) | |||
176 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; | 176 | struct amdgpu_ring *ring = &adev->vcn.ring_dec; |
177 | int i, r; | 177 | int i, r; |
178 | 178 | ||
179 | ring->ready = true; | 179 | r = amdgpu_ring_test_helper(ring); |
180 | r = amdgpu_ring_test_ring(ring); | 180 | if (r) |
181 | if (r) { | ||
182 | ring->ready = false; | ||
183 | goto done; | 181 | goto done; |
184 | } | ||
185 | 182 | ||
186 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { | 183 | for (i = 0; i < adev->vcn.num_enc_rings; ++i) { |
187 | ring = &adev->vcn.ring_enc[i]; | 184 | ring = &adev->vcn.ring_enc[i]; |
188 | ring->ready = true; | 185 | ring->sched.ready = true; |
189 | r = amdgpu_ring_test_ring(ring); | 186 | r = amdgpu_ring_test_helper(ring); |
190 | if (r) { | 187 | if (r) |
191 | ring->ready = false; | ||
192 | goto done; | 188 | goto done; |
193 | } | ||
194 | } | 189 | } |
195 | 190 | ||
196 | ring = &adev->vcn.ring_jpeg; | 191 | ring = &adev->vcn.ring_jpeg; |
197 | ring->ready = true; | 192 | r = amdgpu_ring_test_helper(ring); |
198 | r = amdgpu_ring_test_ring(ring); | 193 | if (r) |
199 | if (r) { | ||
200 | ring->ready = false; | ||
201 | goto done; | 194 | goto done; |
202 | } | ||
203 | 195 | ||
204 | done: | 196 | done: |
205 | if (!r) | 197 | if (!r) |
@@ -224,7 +216,7 @@ static int vcn_v1_0_hw_fini(void *handle) | |||
224 | if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) | 216 | if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) |
225 | vcn_v1_0_stop(adev); | 217 | vcn_v1_0_stop(adev); |
226 | 218 | ||
227 | ring->ready = false; | 219 | ring->sched.ready = false; |
228 | 220 | ||
229 | return 0; | 221 | return 0; |
230 | } | 222 | } |
@@ -1366,10 +1358,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 | |||
1366 | * Write ring commands to execute the indirect buffer | 1358 | * Write ring commands to execute the indirect buffer |
1367 | */ | 1359 | */ |
1368 | static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, | 1360 | static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, |
1369 | struct amdgpu_ib *ib, | 1361 | struct amdgpu_job *job, |
1370 | unsigned vmid, bool ctx_switch) | 1362 | struct amdgpu_ib *ib, |
1363 | bool ctx_switch) | ||
1371 | { | 1364 | { |
1372 | struct amdgpu_device *adev = ring->adev; | 1365 | struct amdgpu_device *adev = ring->adev; |
1366 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1373 | 1367 | ||
1374 | amdgpu_ring_write(ring, | 1368 | amdgpu_ring_write(ring, |
1375 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); | 1369 | PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); |
@@ -1524,8 +1518,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring) | |||
1524 | * Write enc ring commands to execute the indirect buffer | 1518 | * Write enc ring commands to execute the indirect buffer |
1525 | */ | 1519 | */ |
1526 | static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, | 1520 | static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1527 | struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch) | 1521 | struct amdgpu_job *job, |
1522 | struct amdgpu_ib *ib, | ||
1523 | bool ctx_switch) | ||
1528 | { | 1524 | { |
1525 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1526 | |||
1529 | amdgpu_ring_write(ring, VCN_ENC_CMD_IB); | 1527 | amdgpu_ring_write(ring, VCN_ENC_CMD_IB); |
1530 | amdgpu_ring_write(ring, vmid); | 1528 | amdgpu_ring_write(ring, vmid); |
1531 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | 1529 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
@@ -1725,10 +1723,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6 | |||
1725 | * Write ring commands to execute the indirect buffer. | 1723 | * Write ring commands to execute the indirect buffer. |
1726 | */ | 1724 | */ |
1727 | static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, | 1725 | static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, |
1728 | struct amdgpu_ib *ib, | 1726 | struct amdgpu_job *job, |
1729 | unsigned vmid, bool ctx_switch) | 1727 | struct amdgpu_ib *ib, |
1728 | bool ctx_switch) | ||
1730 | { | 1729 | { |
1731 | struct amdgpu_device *adev = ring->adev; | 1730 | struct amdgpu_device *adev = ring->adev; |
1731 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); | ||
1732 | 1732 | ||
1733 | amdgpu_ring_write(ring, | 1733 | amdgpu_ring_write(ring, |
1734 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); | 1734 | PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index a99f71797aa3..a0fda6f9252a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c | |||
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) | |||
129 | else | 129 | else |
130 | wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); | 130 | wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); |
131 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); | 131 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); |
132 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); | 132 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF); |
133 | 133 | ||
134 | /* set rptr, wptr to 0 */ | 134 | /* set rptr, wptr to 0 */ |
135 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); | 135 | WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 2d4473557b0d..d13fc4fcb517 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | |||
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev) | |||
49 | adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); | 49 | adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); |
50 | adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); | 50 | adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); |
51 | adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); | 51 | adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); |
52 | adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i])); | ||
52 | } | 53 | } |
53 | return 0; | 54 | return 0; |
54 | } | 55 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 5d2475d5392c..177d1e5329a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include "kfd_priv.h" | 23 | #include "kfd_priv.h" |
24 | #include "kfd_events.h" | 24 | #include "kfd_events.h" |
25 | #include "cik_int.h" | 25 | #include "cik_int.h" |
26 | #include "amdgpu_amdkfd.h" | ||
26 | 27 | ||
27 | static bool cik_event_interrupt_isr(struct kfd_dev *dev, | 28 | static bool cik_event_interrupt_isr(struct kfd_dev *dev, |
28 | const uint32_t *ih_ring_entry, | 29 | const uint32_t *ih_ring_entry, |
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev, | |||
107 | kfd_process_vm_fault(dev->dqm, pasid); | 108 | kfd_process_vm_fault(dev->dqm, pasid); |
108 | 109 | ||
109 | memset(&info, 0, sizeof(info)); | 110 | memset(&info, 0, sizeof(info)); |
110 | dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info); | 111 | amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info); |
111 | if (!info.page_addr && !info.status) | 112 | if (!info.page_addr && !info.status) |
112 | return; | 113 | return; |
113 | 114 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h index 37ce6dd65391..8e2a1663c4db 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h +++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h | |||
@@ -68,6 +68,4 @@ | |||
68 | 68 | ||
69 | #define GRBM_GFX_INDEX 0x30800 | 69 | #define GRBM_GFX_INDEX 0x30800 |
70 | 70 | ||
71 | #define ATC_VMID_PASID_MAPPING_VALID (1U << 31) | ||
72 | |||
73 | #endif | 71 | #endif |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 14d5b5fa822d..5f4062b41add 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "kfd_priv.h" | 37 | #include "kfd_priv.h" |
38 | #include "kfd_device_queue_manager.h" | 38 | #include "kfd_device_queue_manager.h" |
39 | #include "kfd_dbgmgr.h" | 39 | #include "kfd_dbgmgr.h" |
40 | #include "amdgpu_amdkfd.h" | ||
40 | 41 | ||
41 | static long kfd_ioctl(struct file *, unsigned int, unsigned long); | 42 | static long kfd_ioctl(struct file *, unsigned int, unsigned long); |
42 | static int kfd_open(struct inode *, struct file *); | 43 | static int kfd_open(struct inode *, struct file *); |
@@ -834,8 +835,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, | |||
834 | dev = kfd_device_by_id(args->gpu_id); | 835 | dev = kfd_device_by_id(args->gpu_id); |
835 | if (dev) | 836 | if (dev) |
836 | /* Reading GPU clock counter from KGD */ | 837 | /* Reading GPU clock counter from KGD */ |
837 | args->gpu_clock_counter = | 838 | args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); |
838 | dev->kfd2kgd->get_gpu_clock_counter(dev->kgd); | ||
839 | else | 839 | else |
840 | /* Node without GPU resource */ | 840 | /* Node without GPU resource */ |
841 | args->gpu_clock_counter = 0; | 841 | args->gpu_clock_counter = 0; |
@@ -1042,7 +1042,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, | |||
1042 | } | 1042 | } |
1043 | mutex_unlock(&p->mutex); | 1043 | mutex_unlock(&p->mutex); |
1044 | 1044 | ||
1045 | err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd, | 1045 | err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, |
1046 | mem, &kern_addr, &size); | 1046 | mem, &kern_addr, &size); |
1047 | if (err) { | 1047 | if (err) { |
1048 | pr_err("Failed to map event page to kernel\n"); | 1048 | pr_err("Failed to map event page to kernel\n"); |
@@ -1240,7 +1240,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) | |||
1240 | if (dev->device_info->needs_iommu_device) | 1240 | if (dev->device_info->needs_iommu_device) |
1241 | return false; | 1241 | return false; |
1242 | 1242 | ||
1243 | dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info); | 1243 | amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); |
1244 | if (mem_info.local_mem_size_private == 0 && | 1244 | if (mem_info.local_mem_size_private == 0 && |
1245 | mem_info.local_mem_size_public > 0) | 1245 | mem_info.local_mem_size_public > 0) |
1246 | return true; | 1246 | return true; |
@@ -1281,7 +1281,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, | |||
1281 | goto err_unlock; | 1281 | goto err_unlock; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | err = dev->kfd2kgd->alloc_memory_of_gpu( | 1284 | err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( |
1285 | dev->kgd, args->va_addr, args->size, | 1285 | dev->kgd, args->va_addr, args->size, |
1286 | pdd->vm, (struct kgd_mem **) &mem, &offset, | 1286 | pdd->vm, (struct kgd_mem **) &mem, &offset, |
1287 | flags); | 1287 | flags); |
@@ -1303,7 +1303,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, | |||
1303 | return 0; | 1303 | return 0; |
1304 | 1304 | ||
1305 | err_free: | 1305 | err_free: |
1306 | dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); | 1306 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); |
1307 | err_unlock: | 1307 | err_unlock: |
1308 | mutex_unlock(&p->mutex); | 1308 | mutex_unlock(&p->mutex); |
1309 | return err; | 1309 | return err; |
@@ -1338,7 +1338,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, | |||
1338 | goto err_unlock; | 1338 | goto err_unlock; |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem); | 1341 | ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, |
1342 | (struct kgd_mem *)mem); | ||
1342 | 1343 | ||
1343 | /* If freeing the buffer failed, leave the handle in place for | 1344 | /* If freeing the buffer failed, leave the handle in place for |
1344 | * clean-up during process tear-down. | 1345 | * clean-up during process tear-down. |
@@ -1418,7 +1419,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, | |||
1418 | err = PTR_ERR(peer_pdd); | 1419 | err = PTR_ERR(peer_pdd); |
1419 | goto get_mem_obj_from_handle_failed; | 1420 | goto get_mem_obj_from_handle_failed; |
1420 | } | 1421 | } |
1421 | err = peer->kfd2kgd->map_memory_to_gpu( | 1422 | err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( |
1422 | peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); | 1423 | peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); |
1423 | if (err) { | 1424 | if (err) { |
1424 | pr_err("Failed to map to gpu %d/%d\n", | 1425 | pr_err("Failed to map to gpu %d/%d\n", |
@@ -1430,7 +1431,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, | |||
1430 | 1431 | ||
1431 | mutex_unlock(&p->mutex); | 1432 | mutex_unlock(&p->mutex); |
1432 | 1433 | ||
1433 | err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true); | 1434 | err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); |
1434 | if (err) { | 1435 | if (err) { |
1435 | pr_debug("Sync memory failed, wait interrupted by user signal\n"); | 1436 | pr_debug("Sync memory failed, wait interrupted by user signal\n"); |
1436 | goto sync_memory_failed; | 1437 | goto sync_memory_failed; |
@@ -1525,7 +1526,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, | |||
1525 | err = -ENODEV; | 1526 | err = -ENODEV; |
1526 | goto get_mem_obj_from_handle_failed; | 1527 | goto get_mem_obj_from_handle_failed; |
1527 | } | 1528 | } |
1528 | err = dev->kfd2kgd->unmap_memory_to_gpu( | 1529 | err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( |
1529 | peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); | 1530 | peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm); |
1530 | if (err) { | 1531 | if (err) { |
1531 | pr_err("Failed to unmap from gpu %d/%d\n", | 1532 | pr_err("Failed to unmap from gpu %d/%d\n", |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 56412b0e7e1c..3783d122f283 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include "kfd_priv.h" | 26 | #include "kfd_priv.h" |
27 | #include "kfd_topology.h" | 27 | #include "kfd_topology.h" |
28 | #include "kfd_iommu.h" | 28 | #include "kfd_iommu.h" |
29 | #include "amdgpu_amdkfd.h" | ||
29 | 30 | ||
30 | /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. | 31 | /* GPU Processor ID base for dGPUs for which VCRAT needs to be created. |
31 | * GPU processor ID are expressed with Bit[31]=1. | 32 | * GPU processor ID are expressed with Bit[31]=1. |
@@ -753,12 +754,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size) | |||
753 | return -ENODATA; | 754 | return -ENODATA; |
754 | } | 755 | } |
755 | 756 | ||
756 | pcrat_image = kmalloc(crat_table->length, GFP_KERNEL); | 757 | pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL); |
757 | if (!pcrat_image) | 758 | if (!pcrat_image) |
758 | return -ENOMEM; | 759 | return -ENOMEM; |
759 | 760 | ||
760 | memcpy(pcrat_image, crat_table, crat_table->length); | ||
761 | |||
762 | *crat_image = pcrat_image; | 761 | *crat_image = pcrat_image; |
763 | *size = crat_table->length; | 762 | *size = crat_table->length; |
764 | 763 | ||
@@ -1161,7 +1160,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, | |||
1161 | cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; | 1160 | cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; |
1162 | cu->proximity_domain = proximity_domain; | 1161 | cu->proximity_domain = proximity_domain; |
1163 | 1162 | ||
1164 | kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info); | 1163 | amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info); |
1165 | cu->num_simd_per_cu = cu_info.simd_per_cu; | 1164 | cu->num_simd_per_cu = cu_info.simd_per_cu; |
1166 | cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; | 1165 | cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; |
1167 | cu->max_waves_simd = cu_info.max_waves_per_simd; | 1166 | cu->max_waves_simd = cu_info.max_waves_per_simd; |
@@ -1192,7 +1191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, | |||
1192 | * report the total FB size (public+private) as a single | 1191 | * report the total FB size (public+private) as a single |
1193 | * private heap. | 1192 | * private heap. |
1194 | */ | 1193 | */ |
1195 | kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info); | 1194 | amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info); |
1196 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + | 1195 | sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + |
1197 | sub_type_hdr->length); | 1196 | sub_type_hdr->length); |
1198 | 1197 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index a9f18ea7e354..c004647c8cb4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include "kfd_pm4_headers_vi.h" | 28 | #include "kfd_pm4_headers_vi.h" |
29 | #include "cwsr_trap_handler.h" | 29 | #include "cwsr_trap_handler.h" |
30 | #include "kfd_iommu.h" | 30 | #include "kfd_iommu.h" |
31 | #include "amdgpu_amdkfd.h" | ||
31 | 32 | ||
32 | #define MQD_SIZE_ALIGNED 768 | 33 | #define MQD_SIZE_ALIGNED 768 |
33 | 34 | ||
@@ -478,7 +479,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, | |||
478 | /* add another 512KB for all other allocations on gart (HPD, fences) */ | 479 | /* add another 512KB for all other allocations on gart (HPD, fences) */ |
479 | size += 512 * 1024; | 480 | size += 512 * 1024; |
480 | 481 | ||
481 | if (kfd->kfd2kgd->init_gtt_mem_allocation( | 482 | if (amdgpu_amdkfd_alloc_gtt_mem( |
482 | kfd->kgd, size, &kfd->gtt_mem, | 483 | kfd->kgd, size, &kfd->gtt_mem, |
483 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, | 484 | &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, |
484 | false)) { | 485 | false)) { |
@@ -552,7 +553,7 @@ kfd_topology_add_device_error: | |||
552 | kfd_doorbell_error: | 553 | kfd_doorbell_error: |
553 | kfd_gtt_sa_fini(kfd); | 554 | kfd_gtt_sa_fini(kfd); |
554 | kfd_gtt_sa_init_error: | 555 | kfd_gtt_sa_init_error: |
555 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); | 556 | amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); |
556 | dev_err(kfd_device, | 557 | dev_err(kfd_device, |
557 | "device %x:%x NOT added due to errors\n", | 558 | "device %x:%x NOT added due to errors\n", |
558 | kfd->pdev->vendor, kfd->pdev->device); | 559 | kfd->pdev->vendor, kfd->pdev->device); |
@@ -569,7 +570,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) | |||
569 | kfd_topology_remove_device(kfd); | 570 | kfd_topology_remove_device(kfd); |
570 | kfd_doorbell_fini(kfd); | 571 | kfd_doorbell_fini(kfd); |
571 | kfd_gtt_sa_fini(kfd); | 572 | kfd_gtt_sa_fini(kfd); |
572 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); | 573 | amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); |
573 | } | 574 | } |
574 | 575 | ||
575 | kfree(kfd); | 576 | kfree(kfd); |
@@ -681,6 +682,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |||
681 | { | 682 | { |
682 | uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; | 683 | uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; |
683 | bool is_patched = false; | 684 | bool is_patched = false; |
685 | unsigned long flags; | ||
684 | 686 | ||
685 | if (!kfd->init_complete) | 687 | if (!kfd->init_complete) |
686 | return; | 688 | return; |
@@ -690,7 +692,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |||
690 | return; | 692 | return; |
691 | } | 693 | } |
692 | 694 | ||
693 | spin_lock(&kfd->interrupt_lock); | 695 | spin_lock_irqsave(&kfd->interrupt_lock, flags); |
694 | 696 | ||
695 | if (kfd->interrupts_active | 697 | if (kfd->interrupts_active |
696 | && interrupt_is_wanted(kfd, ih_ring_entry, | 698 | && interrupt_is_wanted(kfd, ih_ring_entry, |
@@ -699,7 +701,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |||
699 | is_patched ? patched_ihre : ih_ring_entry)) | 701 | is_patched ? patched_ihre : ih_ring_entry)) |
700 | queue_work(kfd->ih_wq, &kfd->interrupt_work); | 702 | queue_work(kfd->ih_wq, &kfd->interrupt_work); |
701 | 703 | ||
702 | spin_unlock(&kfd->interrupt_lock); | 704 | spin_unlock_irqrestore(&kfd->interrupt_lock, flags); |
703 | } | 705 | } |
704 | 706 | ||
705 | int kgd2kfd_quiesce_mm(struct mm_struct *mm) | 707 | int kgd2kfd_quiesce_mm(struct mm_struct *mm) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index a3b933967171..fb9d66ea13b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include "kfd_mqd_manager.h" | 33 | #include "kfd_mqd_manager.h" |
34 | #include "cik_regs.h" | 34 | #include "cik_regs.h" |
35 | #include "kfd_kernel_queue.h" | 35 | #include "kfd_kernel_queue.h" |
36 | #include "amdgpu_amdkfd.h" | ||
36 | 37 | ||
37 | /* Size of the per-pipe EOP queue */ | 38 | /* Size of the per-pipe EOP queue */ |
38 | #define CIK_HPD_EOP_BYTES_LOG2 11 | 39 | #define CIK_HPD_EOP_BYTES_LOG2 11 |
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, | |||
219 | if (ret) | 220 | if (ret) |
220 | return ret; | 221 | return ret; |
221 | 222 | ||
222 | return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, | 223 | return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, |
223 | qpd->ib_base, (uint32_t *)qpd->ib_kaddr, | 224 | qpd->ib_base, (uint32_t *)qpd->ib_kaddr, |
224 | pmf->release_mem_size / sizeof(uint32_t)); | 225 | pmf->release_mem_size / sizeof(uint32_t)); |
225 | } | 226 | } |
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |||
672 | 673 | ||
673 | pdd = qpd_to_pdd(qpd); | 674 | pdd = qpd_to_pdd(qpd); |
674 | /* Retrieve PD base */ | 675 | /* Retrieve PD base */ |
675 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | 676 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
676 | 677 | ||
677 | dqm_lock(dqm); | 678 | dqm_lock(dqm); |
678 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ | 679 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, | |||
743 | 744 | ||
744 | pdd = qpd_to_pdd(qpd); | 745 | pdd = qpd_to_pdd(qpd); |
745 | /* Retrieve PD base */ | 746 | /* Retrieve PD base */ |
746 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | 747 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
747 | 748 | ||
748 | dqm_lock(dqm); | 749 | dqm_lock(dqm); |
749 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ | 750 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm, | |||
793 | 794 | ||
794 | pdd = qpd_to_pdd(qpd); | 795 | pdd = qpd_to_pdd(qpd); |
795 | /* Retrieve PD base */ | 796 | /* Retrieve PD base */ |
796 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | 797 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
797 | 798 | ||
798 | dqm_lock(dqm); | 799 | dqm_lock(dqm); |
799 | list_add(&n->list, &dqm->queues); | 800 | list_add(&n->list, &dqm->queues); |
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm, | |||
805 | retval = dqm->asic_ops.update_qpd(dqm, qpd); | 806 | retval = dqm->asic_ops.update_qpd(dqm, qpd); |
806 | 807 | ||
807 | if (dqm->processes_count++ == 0) | 808 | if (dqm->processes_count++ == 0) |
808 | dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false); | 809 | amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false); |
809 | 810 | ||
810 | dqm_unlock(dqm); | 811 | dqm_unlock(dqm); |
811 | 812 | ||
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm, | |||
829 | list_del(&cur->list); | 830 | list_del(&cur->list); |
830 | kfree(cur); | 831 | kfree(cur); |
831 | if (--dqm->processes_count == 0) | 832 | if (--dqm->processes_count == 0) |
832 | dqm->dev->kfd2kgd->set_compute_idle( | 833 | amdgpu_amdkfd_set_compute_idle( |
833 | dqm->dev->kgd, true); | 834 | dqm->dev->kgd, true); |
834 | goto out; | 835 | goto out; |
835 | } | 836 | } |
@@ -845,15 +846,8 @@ static int | |||
845 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, | 846 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, |
846 | unsigned int vmid) | 847 | unsigned int vmid) |
847 | { | 848 | { |
848 | uint32_t pasid_mapping; | ||
849 | |||
850 | pasid_mapping = (pasid == 0) ? 0 : | ||
851 | (uint32_t)pasid | | ||
852 | ATC_VMID_PASID_MAPPING_VALID; | ||
853 | |||
854 | return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( | 849 | return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( |
855 | dqm->dev->kgd, pasid_mapping, | 850 | dqm->dev->kgd, pasid, vmid); |
856 | vmid); | ||
857 | } | 851 | } |
858 | 852 | ||
859 | static void init_interrupts(struct device_queue_manager *dqm) | 853 | static void init_interrupts(struct device_queue_manager *dqm) |
@@ -1796,7 +1790,7 @@ static void kfd_process_hw_exception(struct work_struct *work) | |||
1796 | { | 1790 | { |
1797 | struct device_queue_manager *dqm = container_of(work, | 1791 | struct device_queue_manager *dqm = container_of(work, |
1798 | struct device_queue_manager, hw_exception_work); | 1792 | struct device_queue_manager, hw_exception_work); |
1799 | dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd); | 1793 | amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); |
1800 | } | 1794 | } |
1801 | 1795 | ||
1802 | #if defined(CONFIG_DEBUG_FS) | 1796 | #if defined(CONFIG_DEBUG_FS) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index e33019a7a883..6910028010d6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include "kfd_mqd_manager.h" | 24 | #include "kfd_mqd_manager.h" |
25 | #include "amdgpu_amdkfd.h" | ||
25 | 26 | ||
26 | struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, | 27 | struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, |
27 | struct kfd_dev *dev) | 28 | struct kfd_dev *dev) |
@@ -58,7 +59,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, | |||
58 | uint32_t cu_per_sh[4] = {0}; | 59 | uint32_t cu_per_sh[4] = {0}; |
59 | int i, se, cu = 0; | 60 | int i, se, cu = 0; |
60 | 61 | ||
61 | mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info); | 62 | amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); |
62 | 63 | ||
63 | if (cu_mask_count > cu_info.cu_active_number) | 64 | if (cu_mask_count > cu_info.cu_active_number) |
64 | cu_mask_count = cu_info.cu_active_number; | 65 | cu_mask_count = cu_info.cu_active_number; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index f381c1cb27bd..9dbba609450e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "gc/gc_9_0_offset.h" | 30 | #include "gc/gc_9_0_offset.h" |
31 | #include "gc/gc_9_0_sh_mask.h" | 31 | #include "gc/gc_9_0_sh_mask.h" |
32 | #include "sdma0/sdma0_4_0_sh_mask.h" | 32 | #include "sdma0/sdma0_4_0_sh_mask.h" |
33 | #include "amdgpu_amdkfd.h" | ||
33 | 34 | ||
34 | static inline struct v9_mqd *get_mqd(void *mqd) | 35 | static inline struct v9_mqd *get_mqd(void *mqd) |
35 | { | 36 | { |
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, | |||
83 | *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); | 84 | *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); |
84 | if (!*mqd_mem_obj) | 85 | if (!*mqd_mem_obj) |
85 | return -ENOMEM; | 86 | return -ENOMEM; |
86 | retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd, | 87 | retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, |
87 | ALIGN(q->ctl_stack_size, PAGE_SIZE) + | 88 | ALIGN(q->ctl_stack_size, PAGE_SIZE) + |
88 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), | 89 | ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), |
89 | &((*mqd_mem_obj)->gtt_mem), | 90 | &((*mqd_mem_obj)->gtt_mem), |
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd, | |||
250 | struct kfd_dev *kfd = mm->dev; | 251 | struct kfd_dev *kfd = mm->dev; |
251 | 252 | ||
252 | if (mqd_mem_obj->gtt_mem) { | 253 | if (mqd_mem_obj->gtt_mem) { |
253 | kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); | 254 | amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); |
254 | kfree(mqd_mem_obj); | 255 | kfree(mqd_mem_obj); |
255 | } else { | 256 | } else { |
256 | kfd_gtt_sa_free(mm->dev, mqd_mem_obj); | 257 | kfd_gtt_sa_free(mm->dev, mqd_mem_obj); |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 15fff4420e53..33b08ff00b50 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include "kfd_priv.h" | 24 | #include "kfd_priv.h" |
25 | #include "amdgpu_ids.h" | ||
25 | 26 | ||
26 | static unsigned int pasid_bits = 16; | 27 | static unsigned int pasid_bits = 16; |
27 | static const struct kfd2kgd_calls *kfd2kgd; | 28 | static const struct kfd2kgd_calls *kfd2kgd; |
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void) | |||
71 | return false; | 72 | return false; |
72 | } | 73 | } |
73 | 74 | ||
74 | r = kfd2kgd->alloc_pasid(pasid_bits); | 75 | r = amdgpu_pasid_alloc(pasid_bits); |
75 | 76 | ||
76 | return r > 0 ? r : 0; | 77 | return r > 0 ? r : 0; |
77 | } | 78 | } |
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void) | |||
79 | void kfd_pasid_free(unsigned int pasid) | 80 | void kfd_pasid_free(unsigned int pasid) |
80 | { | 81 | { |
81 | if (kfd2kgd) | 82 | if (kfd2kgd) |
82 | kfd2kgd->free_pasid(pasid); | 83 | amdgpu_pasid_free(pasid); |
83 | } | 84 | } |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 53ff86d45d91..dec8e64f36bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h | |||
@@ -507,6 +507,7 @@ struct qcm_process_device { | |||
507 | * All the memory management data should be here too | 507 | * All the memory management data should be here too |
508 | */ | 508 | */ |
509 | uint64_t gds_context_area; | 509 | uint64_t gds_context_area; |
510 | /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ | ||
510 | uint64_t page_table_base; | 511 | uint64_t page_table_base; |
511 | uint32_t sh_mem_config; | 512 | uint32_t sh_mem_config; |
512 | uint32_t sh_mem_bases; | 513 | uint32_t sh_mem_bases; |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 0039e451d9af..80b36e860a0a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/compat.h> | 31 | #include <linux/compat.h> |
32 | #include <linux/mman.h> | 32 | #include <linux/mman.h> |
33 | #include <linux/file.h> | 33 | #include <linux/file.h> |
34 | #include "amdgpu_amdkfd.h" | ||
34 | 35 | ||
35 | struct mm_struct; | 36 | struct mm_struct; |
36 | 37 | ||
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, | |||
100 | { | 101 | { |
101 | struct kfd_dev *dev = pdd->dev; | 102 | struct kfd_dev *dev = pdd->dev; |
102 | 103 | ||
103 | dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm); | 104 | amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm); |
104 | dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem); | 105 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem); |
105 | } | 106 | } |
106 | 107 | ||
107 | /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process | 108 | /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process |
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, | |||
119 | int handle; | 120 | int handle; |
120 | int err; | 121 | int err; |
121 | 122 | ||
122 | err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size, | 123 | err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, |
123 | pdd->vm, &mem, NULL, flags); | 124 | pdd->vm, &mem, NULL, flags); |
124 | if (err) | 125 | if (err) |
125 | goto err_alloc_mem; | 126 | goto err_alloc_mem; |
126 | 127 | ||
127 | err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm); | 128 | err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm); |
128 | if (err) | 129 | if (err) |
129 | goto err_map_mem; | 130 | goto err_map_mem; |
130 | 131 | ||
131 | err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true); | 132 | err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true); |
132 | if (err) { | 133 | if (err) { |
133 | pr_debug("Sync memory failed, wait interrupted by user signal\n"); | 134 | pr_debug("Sync memory failed, wait interrupted by user signal\n"); |
134 | goto sync_memory_failed; | 135 | goto sync_memory_failed; |
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, | |||
147 | } | 148 | } |
148 | 149 | ||
149 | if (kptr) { | 150 | if (kptr) { |
150 | err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd, | 151 | err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, |
151 | (struct kgd_mem *)mem, kptr, NULL); | 152 | (struct kgd_mem *)mem, kptr, NULL); |
152 | if (err) { | 153 | if (err) { |
153 | pr_debug("Map GTT BO to kernel failed\n"); | 154 | pr_debug("Map GTT BO to kernel failed\n"); |
@@ -165,7 +166,7 @@ sync_memory_failed: | |||
165 | return err; | 166 | return err; |
166 | 167 | ||
167 | err_map_mem: | 168 | err_map_mem: |
168 | kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem); | 169 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem); |
169 | err_alloc_mem: | 170 | err_alloc_mem: |
170 | *kptr = NULL; | 171 | *kptr = NULL; |
171 | return err; | 172 | return err; |
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) | |||
296 | per_device_list) { | 297 | per_device_list) { |
297 | if (!peer_pdd->vm) | 298 | if (!peer_pdd->vm) |
298 | continue; | 299 | continue; |
299 | peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu( | 300 | amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( |
300 | peer_pdd->dev->kgd, mem, peer_pdd->vm); | 301 | peer_pdd->dev->kgd, mem, peer_pdd->vm); |
301 | } | 302 | } |
302 | 303 | ||
303 | pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem); | 304 | amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem); |
304 | kfd_process_device_remove_obj_handle(pdd, id); | 305 | kfd_process_device_remove_obj_handle(pdd, id); |
305 | } | 306 | } |
306 | } | 307 | } |
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) | |||
323 | pdd->dev->id, p->pasid); | 324 | pdd->dev->id, p->pasid); |
324 | 325 | ||
325 | if (pdd->drm_file) { | 326 | if (pdd->drm_file) { |
326 | pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm); | 327 | amdgpu_amdkfd_gpuvm_release_process_vm( |
328 | pdd->dev->kgd, pdd->vm); | ||
327 | fput(pdd->drm_file); | 329 | fput(pdd->drm_file); |
328 | } | 330 | } |
329 | else if (pdd->vm) | 331 | else if (pdd->vm) |
330 | pdd->dev->kfd2kgd->destroy_process_vm( | 332 | amdgpu_amdkfd_gpuvm_destroy_process_vm( |
331 | pdd->dev->kgd, pdd->vm); | 333 | pdd->dev->kgd, pdd->vm); |
332 | 334 | ||
333 | list_del(&pdd->per_device_list); | 335 | list_del(&pdd->per_device_list); |
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, | |||
688 | dev = pdd->dev; | 690 | dev = pdd->dev; |
689 | 691 | ||
690 | if (drm_file) | 692 | if (drm_file) |
691 | ret = dev->kfd2kgd->acquire_process_vm( | 693 | ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( |
692 | dev->kgd, drm_file, p->pasid, | 694 | dev->kgd, drm_file, p->pasid, |
693 | &pdd->vm, &p->kgd_process_info, &p->ef); | 695 | &pdd->vm, &p->kgd_process_info, &p->ef); |
694 | else | 696 | else |
695 | ret = dev->kfd2kgd->create_process_vm( | 697 | ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid, |
696 | dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef); | 698 | &pdd->vm, &p->kgd_process_info, &p->ef); |
697 | if (ret) { | 699 | if (ret) { |
698 | pr_err("Failed to create process VM object\n"); | 700 | pr_err("Failed to create process VM object\n"); |
699 | return ret; | 701 | return ret; |
@@ -714,7 +716,7 @@ err_init_cwsr: | |||
714 | err_reserve_ib_mem: | 716 | err_reserve_ib_mem: |
715 | kfd_process_device_free_bos(pdd); | 717 | kfd_process_device_free_bos(pdd); |
716 | if (!drm_file) | 718 | if (!drm_file) |
717 | dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm); | 719 | amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm); |
718 | pdd->vm = NULL; | 720 | pdd->vm = NULL; |
719 | 721 | ||
720 | return ret; | 722 | return ret; |
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work) | |||
972 | */ | 974 | */ |
973 | 975 | ||
974 | p->last_restore_timestamp = get_jiffies_64(); | 976 | p->last_restore_timestamp = get_jiffies_64(); |
975 | ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info, | 977 | ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, |
976 | &p->ef); | 978 | &p->ef); |
977 | if (ret) { | 979 | if (ret) { |
978 | pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", | 980 | pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e3843c5929ed..c73b4ff61f99 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "kfd_topology.h" | 36 | #include "kfd_topology.h" |
37 | #include "kfd_device_queue_manager.h" | 37 | #include "kfd_device_queue_manager.h" |
38 | #include "kfd_iommu.h" | 38 | #include "kfd_iommu.h" |
39 | #include "amdgpu_amdkfd.h" | ||
39 | 40 | ||
40 | /* topology_device_list - Master list of all topology devices */ | 41 | /* topology_device_list - Master list of all topology devices */ |
41 | static struct list_head topology_device_list; | 42 | static struct list_head topology_device_list; |
@@ -1052,7 +1053,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) | |||
1052 | if (!gpu) | 1053 | if (!gpu) |
1053 | return 0; | 1054 | return 0; |
1054 | 1055 | ||
1055 | gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info); | 1056 | amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info); |
1056 | 1057 | ||
1057 | local_mem_size = local_mem_info.local_mem_size_private + | 1058 | local_mem_size = local_mem_info.local_mem_size_private + |
1058 | local_mem_info.local_mem_size_public; | 1059 | local_mem_info.local_mem_size_public; |
@@ -1118,8 +1119,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) | |||
1118 | * for APUs - If CRAT from ACPI reports more than one bank, then | 1119 | * for APUs - If CRAT from ACPI reports more than one bank, then |
1119 | * all the banks will report the same mem_clk_max information | 1120 | * all the banks will report the same mem_clk_max information |
1120 | */ | 1121 | */ |
1121 | dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd, | 1122 | amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info); |
1122 | &local_mem_info); | ||
1123 | 1123 | ||
1124 | list_for_each_entry(mem, &dev->mem_props, list) | 1124 | list_for_each_entry(mem, &dev->mem_props, list) |
1125 | mem->mem_clk_max = local_mem_info.mem_clk_max; | 1125 | mem->mem_clk_max = local_mem_info.mem_clk_max; |
@@ -1240,7 +1240,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) | |||
1240 | * needed for the topology | 1240 | * needed for the topology |
1241 | */ | 1241 | */ |
1242 | 1242 | ||
1243 | dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info); | 1243 | amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); |
1244 | dev->node_props.simd_arrays_per_engine = | 1244 | dev->node_props.simd_arrays_per_engine = |
1245 | cu_info.num_shader_arrays_per_engine; | 1245 | cu_info.num_shader_arrays_per_engine; |
1246 | 1246 | ||
@@ -1249,7 +1249,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) | |||
1249 | dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number, | 1249 | dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number, |
1250 | gpu->pdev->devfn); | 1250 | gpu->pdev->devfn); |
1251 | dev->node_props.max_engine_clk_fcompute = | 1251 | dev->node_props.max_engine_clk_fcompute = |
1252 | dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd); | 1252 | amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); |
1253 | dev->node_props.max_engine_clk_ccompute = | 1253 | dev->node_props.max_engine_clk_ccompute = |
1254 | cpufreq_quick_get_max(0) / 1000; | 1254 | cpufreq_quick_get_max(0) / 1000; |
1255 | dev->node_props.drm_render_minor = | 1255 | dev->node_props.drm_render_minor = |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dd688cfed6aa..aa43bb253ea2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -76,6 +76,16 @@ | |||
76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" | 76 | #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" |
77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); | 77 | MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); |
78 | 78 | ||
79 | /** | ||
80 | * DOC: overview | ||
81 | * | ||
82 | * The AMDgpu display manager, **amdgpu_dm** (or even simpler, | ||
83 | * **dm**) sits between DRM and DC. It acts as a liason, converting DRM | ||
84 | * requests into DC requests, and DC responses into DRM responses. | ||
85 | * | ||
86 | * The root control structure is &struct amdgpu_display_manager. | ||
87 | */ | ||
88 | |||
79 | /* basic init/fini API */ | 89 | /* basic init/fini API */ |
80 | static int amdgpu_dm_init(struct amdgpu_device *adev); | 90 | static int amdgpu_dm_init(struct amdgpu_device *adev); |
81 | static void amdgpu_dm_fini(struct amdgpu_device *adev); | 91 | static void amdgpu_dm_fini(struct amdgpu_device *adev); |
@@ -95,7 +105,7 @@ static void | |||
95 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); | 105 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); |
96 | 106 | ||
97 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | 107 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
98 | struct amdgpu_plane *aplane, | 108 | struct drm_plane *plane, |
99 | unsigned long possible_crtcs); | 109 | unsigned long possible_crtcs); |
100 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | 110 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, |
101 | struct drm_plane *plane, | 111 | struct drm_plane *plane, |
@@ -379,11 +389,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector) | |||
379 | 389 | ||
380 | } | 390 | } |
381 | 391 | ||
382 | /* | ||
383 | * Init display KMS | ||
384 | * | ||
385 | * Returns 0 on success | ||
386 | */ | ||
387 | static int amdgpu_dm_init(struct amdgpu_device *adev) | 392 | static int amdgpu_dm_init(struct amdgpu_device *adev) |
388 | { | 393 | { |
389 | struct dc_init_data init_data; | 394 | struct dc_init_data init_data; |
@@ -429,6 +434,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) | |||
429 | adev->asic_type < CHIP_RAVEN) | 434 | adev->asic_type < CHIP_RAVEN) |
430 | init_data.flags.gpu_vm_support = true; | 435 | init_data.flags.gpu_vm_support = true; |
431 | 436 | ||
437 | if (amdgpu_dc_feature_mask & DC_FBC_MASK) | ||
438 | init_data.flags.fbc_support = true; | ||
439 | |||
432 | /* Display Core create. */ | 440 | /* Display Core create. */ |
433 | adev->dm.dc = dc_create(&init_data); | 441 | adev->dm.dc = dc_create(&init_data); |
434 | 442 | ||
@@ -660,6 +668,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) | |||
660 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 668 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
661 | } | 669 | } |
662 | 670 | ||
671 | /** | ||
672 | * dm_hw_init() - Initialize DC device | ||
673 | * @handle: The base driver device containing the amdpgu_dm device. | ||
674 | * | ||
675 | * Initialize the &struct amdgpu_display_manager device. This involves calling | ||
676 | * the initializers of each DM component, then populating the struct with them. | ||
677 | * | ||
678 | * Although the function implies hardware initialization, both hardware and | ||
679 | * software are initialized here. Splitting them out to their relevant init | ||
680 | * hooks is a future TODO item. | ||
681 | * | ||
682 | * Some notable things that are initialized here: | ||
683 | * | ||
684 | * - Display Core, both software and hardware | ||
685 | * - DC modules that we need (freesync and color management) | ||
686 | * - DRM software states | ||
687 | * - Interrupt sources and handlers | ||
688 | * - Vblank support | ||
689 | * - Debug FS entries, if enabled | ||
690 | */ | ||
663 | static int dm_hw_init(void *handle) | 691 | static int dm_hw_init(void *handle) |
664 | { | 692 | { |
665 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 693 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -670,6 +698,14 @@ static int dm_hw_init(void *handle) | |||
670 | return 0; | 698 | return 0; |
671 | } | 699 | } |
672 | 700 | ||
701 | /** | ||
702 | * dm_hw_fini() - Teardown DC device | ||
703 | * @handle: The base driver device containing the amdpgu_dm device. | ||
704 | * | ||
705 | * Teardown components within &struct amdgpu_display_manager that require | ||
706 | * cleanup. This involves cleaning up the DRM device, DC, and any modules that | ||
707 | * were loaded. Also flush IRQ workqueues and disable them. | ||
708 | */ | ||
673 | static int dm_hw_fini(void *handle) | 709 | static int dm_hw_fini(void *handle) |
674 | { | 710 | { |
675 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 711 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
@@ -895,6 +931,16 @@ static int dm_resume(void *handle) | |||
895 | return ret; | 931 | return ret; |
896 | } | 932 | } |
897 | 933 | ||
934 | /** | ||
935 | * DOC: DM Lifecycle | ||
936 | * | ||
937 | * DM (and consequently DC) is registered in the amdgpu base driver as a IP | ||
938 | * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to | ||
939 | * the base driver's device list to be initialized and torn down accordingly. | ||
940 | * | ||
941 | * The functions to do so are provided as hooks in &struct amd_ip_funcs. | ||
942 | */ | ||
943 | |||
898 | static const struct amd_ip_funcs amdgpu_dm_funcs = { | 944 | static const struct amd_ip_funcs amdgpu_dm_funcs = { |
899 | .name = "dm", | 945 | .name = "dm", |
900 | .early_init = dm_early_init, | 946 | .early_init = dm_early_init, |
@@ -962,6 +1008,12 @@ dm_atomic_state_alloc_free(struct drm_atomic_state *state) | |||
962 | kfree(dm_state); | 1008 | kfree(dm_state); |
963 | } | 1009 | } |
964 | 1010 | ||
1011 | /** | ||
1012 | * DOC: atomic | ||
1013 | * | ||
1014 | * *WIP* | ||
1015 | */ | ||
1016 | |||
965 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { | 1017 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { |
966 | .fb_create = amdgpu_display_user_framebuffer_create, | 1018 | .fb_create = amdgpu_display_user_framebuffer_create, |
967 | .output_poll_changed = drm_fb_helper_output_poll_changed, | 1019 | .output_poll_changed = drm_fb_helper_output_poll_changed, |
@@ -1524,15 +1576,23 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) | |||
1524 | { | 1576 | { |
1525 | struct amdgpu_display_manager *dm = bl_get_data(bd); | 1577 | struct amdgpu_display_manager *dm = bl_get_data(bd); |
1526 | 1578 | ||
1579 | /* backlight_pwm_u16_16 parameter is in unsigned 32 bit, 16 bit integer | ||
1580 | * and 16 bit fractional, where 1.0 is max backlight value. | ||
1581 | * bd->props.brightness is 8 bit format and needs to be converted by | ||
1582 | * scaling via copy lower byte to upper byte of 16 bit value. | ||
1583 | */ | ||
1584 | uint32_t brightness = bd->props.brightness * 0x101; | ||
1585 | |||
1527 | /* | 1586 | /* |
1528 | * PWM interperts 0 as 100% rather than 0% because of HW | 1587 | * PWM interperts 0 as 100% rather than 0% because of HW |
1529 | * limitation for level 0.So limiting minimum brightness level | 1588 | * limitation for level 0. So limiting minimum brightness level |
1530 | * to 1. | 1589 | * to 1. |
1531 | */ | 1590 | */ |
1532 | if (bd->props.brightness < 1) | 1591 | if (bd->props.brightness < 1) |
1533 | return 1; | 1592 | brightness = 0x101; |
1593 | |||
1534 | if (dc_link_set_backlight_level(dm->backlight_link, | 1594 | if (dc_link_set_backlight_level(dm->backlight_link, |
1535 | bd->props.brightness, 0, 0)) | 1595 | brightness, 0, 0)) |
1536 | return 0; | 1596 | return 0; |
1537 | else | 1597 | else |
1538 | return 1; | 1598 | return 1; |
@@ -1584,18 +1644,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm, | |||
1584 | struct amdgpu_mode_info *mode_info, | 1644 | struct amdgpu_mode_info *mode_info, |
1585 | int plane_id) | 1645 | int plane_id) |
1586 | { | 1646 | { |
1587 | struct amdgpu_plane *plane; | 1647 | struct drm_plane *plane; |
1588 | unsigned long possible_crtcs; | 1648 | unsigned long possible_crtcs; |
1589 | int ret = 0; | 1649 | int ret = 0; |
1590 | 1650 | ||
1591 | plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); | 1651 | plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); |
1592 | mode_info->planes[plane_id] = plane; | 1652 | mode_info->planes[plane_id] = plane; |
1593 | 1653 | ||
1594 | if (!plane) { | 1654 | if (!plane) { |
1595 | DRM_ERROR("KMS: Failed to allocate plane\n"); | 1655 | DRM_ERROR("KMS: Failed to allocate plane\n"); |
1596 | return -ENOMEM; | 1656 | return -ENOMEM; |
1597 | } | 1657 | } |
1598 | plane->base.type = mode_info->plane_type[plane_id]; | 1658 | plane->type = mode_info->plane_type[plane_id]; |
1599 | 1659 | ||
1600 | /* | 1660 | /* |
1601 | * HACK: IGT tests expect that each plane can only have | 1661 | * HACK: IGT tests expect that each plane can only have |
@@ -1686,7 +1746,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) | |||
1686 | } | 1746 | } |
1687 | 1747 | ||
1688 | for (i = 0; i < dm->dc->caps.max_streams; i++) | 1748 | for (i = 0; i < dm->dc->caps.max_streams; i++) |
1689 | if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { | 1749 | if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { |
1690 | DRM_ERROR("KMS: Failed to initialize crtc\n"); | 1750 | DRM_ERROR("KMS: Failed to initialize crtc\n"); |
1691 | goto fail; | 1751 | goto fail; |
1692 | } | 1752 | } |
@@ -2707,18 +2767,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, | |||
2707 | drm_connector = &aconnector->base; | 2767 | drm_connector = &aconnector->base; |
2708 | 2768 | ||
2709 | if (!aconnector->dc_sink) { | 2769 | if (!aconnector->dc_sink) { |
2710 | /* | 2770 | if (!aconnector->mst_port) { |
2711 | * Create dc_sink when necessary to MST | 2771 | sink = create_fake_sink(aconnector); |
2712 | * Don't apply fake_sink to MST | 2772 | if (!sink) |
2713 | */ | 2773 | return stream; |
2714 | if (aconnector->mst_port) { | ||
2715 | dm_dp_mst_dc_sink_create(drm_connector); | ||
2716 | return stream; | ||
2717 | } | 2774 | } |
2718 | |||
2719 | sink = create_fake_sink(aconnector); | ||
2720 | if (!sink) | ||
2721 | return stream; | ||
2722 | } else { | 2775 | } else { |
2723 | sink = aconnector->dc_sink; | 2776 | sink = aconnector->dc_sink; |
2724 | } | 2777 | } |
@@ -3307,7 +3360,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane, | |||
3307 | static const struct drm_plane_funcs dm_plane_funcs = { | 3360 | static const struct drm_plane_funcs dm_plane_funcs = { |
3308 | .update_plane = drm_atomic_helper_update_plane, | 3361 | .update_plane = drm_atomic_helper_update_plane, |
3309 | .disable_plane = drm_atomic_helper_disable_plane, | 3362 | .disable_plane = drm_atomic_helper_disable_plane, |
3310 | .destroy = drm_plane_cleanup, | 3363 | .destroy = drm_primary_helper_destroy, |
3311 | .reset = dm_drm_plane_reset, | 3364 | .reset = dm_drm_plane_reset, |
3312 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, | 3365 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, |
3313 | .atomic_destroy_state = dm_drm_plane_destroy_state, | 3366 | .atomic_destroy_state = dm_drm_plane_destroy_state, |
@@ -3468,49 +3521,49 @@ static const u32 cursor_formats[] = { | |||
3468 | }; | 3521 | }; |
3469 | 3522 | ||
3470 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, | 3523 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
3471 | struct amdgpu_plane *aplane, | 3524 | struct drm_plane *plane, |
3472 | unsigned long possible_crtcs) | 3525 | unsigned long possible_crtcs) |
3473 | { | 3526 | { |
3474 | int res = -EPERM; | 3527 | int res = -EPERM; |
3475 | 3528 | ||
3476 | switch (aplane->base.type) { | 3529 | switch (plane->type) { |
3477 | case DRM_PLANE_TYPE_PRIMARY: | 3530 | case DRM_PLANE_TYPE_PRIMARY: |
3478 | res = drm_universal_plane_init( | 3531 | res = drm_universal_plane_init( |
3479 | dm->adev->ddev, | 3532 | dm->adev->ddev, |
3480 | &aplane->base, | 3533 | plane, |
3481 | possible_crtcs, | 3534 | possible_crtcs, |
3482 | &dm_plane_funcs, | 3535 | &dm_plane_funcs, |
3483 | rgb_formats, | 3536 | rgb_formats, |
3484 | ARRAY_SIZE(rgb_formats), | 3537 | ARRAY_SIZE(rgb_formats), |
3485 | NULL, aplane->base.type, NULL); | 3538 | NULL, plane->type, NULL); |
3486 | break; | 3539 | break; |
3487 | case DRM_PLANE_TYPE_OVERLAY: | 3540 | case DRM_PLANE_TYPE_OVERLAY: |
3488 | res = drm_universal_plane_init( | 3541 | res = drm_universal_plane_init( |
3489 | dm->adev->ddev, | 3542 | dm->adev->ddev, |
3490 | &aplane->base, | 3543 | plane, |
3491 | possible_crtcs, | 3544 | possible_crtcs, |
3492 | &dm_plane_funcs, | 3545 | &dm_plane_funcs, |
3493 | yuv_formats, | 3546 | yuv_formats, |
3494 | ARRAY_SIZE(yuv_formats), | 3547 | ARRAY_SIZE(yuv_formats), |
3495 | NULL, aplane->base.type, NULL); | 3548 | NULL, plane->type, NULL); |
3496 | break; | 3549 | break; |
3497 | case DRM_PLANE_TYPE_CURSOR: | 3550 | case DRM_PLANE_TYPE_CURSOR: |
3498 | res = drm_universal_plane_init( | 3551 | res = drm_universal_plane_init( |
3499 | dm->adev->ddev, | 3552 | dm->adev->ddev, |
3500 | &aplane->base, | 3553 | plane, |
3501 | possible_crtcs, | 3554 | possible_crtcs, |
3502 | &dm_plane_funcs, | 3555 | &dm_plane_funcs, |
3503 | cursor_formats, | 3556 | cursor_formats, |
3504 | ARRAY_SIZE(cursor_formats), | 3557 | ARRAY_SIZE(cursor_formats), |
3505 | NULL, aplane->base.type, NULL); | 3558 | NULL, plane->type, NULL); |
3506 | break; | 3559 | break; |
3507 | } | 3560 | } |
3508 | 3561 | ||
3509 | drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); | 3562 | drm_plane_helper_add(plane, &dm_plane_helper_funcs); |
3510 | 3563 | ||
3511 | /* Create (reset) the plane state */ | 3564 | /* Create (reset) the plane state */ |
3512 | if (aplane->base.funcs->reset) | 3565 | if (plane->funcs->reset) |
3513 | aplane->base.funcs->reset(&aplane->base); | 3566 | plane->funcs->reset(plane); |
3514 | 3567 | ||
3515 | 3568 | ||
3516 | return res; | 3569 | return res; |
@@ -3521,7 +3574,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3521 | uint32_t crtc_index) | 3574 | uint32_t crtc_index) |
3522 | { | 3575 | { |
3523 | struct amdgpu_crtc *acrtc = NULL; | 3576 | struct amdgpu_crtc *acrtc = NULL; |
3524 | struct amdgpu_plane *cursor_plane; | 3577 | struct drm_plane *cursor_plane; |
3525 | 3578 | ||
3526 | int res = -ENOMEM; | 3579 | int res = -ENOMEM; |
3527 | 3580 | ||
@@ -3529,7 +3582,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3529 | if (!cursor_plane) | 3582 | if (!cursor_plane) |
3530 | goto fail; | 3583 | goto fail; |
3531 | 3584 | ||
3532 | cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; | 3585 | cursor_plane->type = DRM_PLANE_TYPE_CURSOR; |
3533 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); | 3586 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); |
3534 | 3587 | ||
3535 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); | 3588 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); |
@@ -3540,7 +3593,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, | |||
3540 | dm->ddev, | 3593 | dm->ddev, |
3541 | &acrtc->base, | 3594 | &acrtc->base, |
3542 | plane, | 3595 | plane, |
3543 | &cursor_plane->base, | 3596 | cursor_plane, |
3544 | &amdgpu_dm_crtc_funcs, NULL); | 3597 | &amdgpu_dm_crtc_funcs, NULL); |
3545 | 3598 | ||
3546 | if (res) | 3599 | if (res) |
@@ -3779,12 +3832,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
3779 | case DRM_MODE_CONNECTOR_HDMIA: | 3832 | case DRM_MODE_CONNECTOR_HDMIA: |
3780 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3833 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
3781 | aconnector->base.ycbcr_420_allowed = | 3834 | aconnector->base.ycbcr_420_allowed = |
3782 | link->link_enc->features.ycbcr420_supported ? true : false; | 3835 | link->link_enc->features.hdmi_ycbcr420_supported ? true : false; |
3783 | break; | 3836 | break; |
3784 | case DRM_MODE_CONNECTOR_DisplayPort: | 3837 | case DRM_MODE_CONNECTOR_DisplayPort: |
3785 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3838 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
3786 | aconnector->base.ycbcr_420_allowed = | 3839 | aconnector->base.ycbcr_420_allowed = |
3787 | link->link_enc->features.ycbcr420_supported ? true : false; | 3840 | link->link_enc->features.dp_ycbcr420_supported ? true : false; |
3788 | break; | 3841 | break; |
3789 | case DRM_MODE_CONNECTOR_DVID: | 3842 | case DRM_MODE_CONNECTOR_DVID: |
3790 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; | 3843 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
@@ -4542,6 +4595,14 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev, | |||
4542 | /*TODO Handle EINTR, reenable IRQ*/ | 4595 | /*TODO Handle EINTR, reenable IRQ*/ |
4543 | } | 4596 | } |
4544 | 4597 | ||
4598 | /** | ||
4599 | * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. | ||
4600 | * @state: The atomic state to commit | ||
4601 | * | ||
4602 | * This will tell DC to commit the constructed DC state from atomic_check, | ||
4603 | * programming the hardware. Any failures here implies a hardware failure, since | ||
4604 | * atomic check should have filtered anything non-kosher. | ||
4605 | */ | ||
4545 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) | 4606 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) |
4546 | { | 4607 | { |
4547 | struct drm_device *dev = state->dev; | 4608 | struct drm_device *dev = state->dev; |
@@ -5313,6 +5374,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru | |||
5313 | struct dc_stream_update stream_update; | 5374 | struct dc_stream_update stream_update; |
5314 | enum surface_update_type update_type = UPDATE_TYPE_FAST; | 5375 | enum surface_update_type update_type = UPDATE_TYPE_FAST; |
5315 | 5376 | ||
5377 | if (!updates || !surface) { | ||
5378 | DRM_ERROR("Plane or surface update failed to allocate"); | ||
5379 | /* Set type to FULL to avoid crashing in DC*/ | ||
5380 | update_type = UPDATE_TYPE_FULL; | ||
5381 | goto ret; | ||
5382 | } | ||
5316 | 5383 | ||
5317 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 5384 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
5318 | new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); | 5385 | new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); |
@@ -5388,6 +5455,31 @@ ret: | |||
5388 | return update_type; | 5455 | return update_type; |
5389 | } | 5456 | } |
5390 | 5457 | ||
5458 | /** | ||
5459 | * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. | ||
5460 | * @dev: The DRM device | ||
5461 | * @state: The atomic state to commit | ||
5462 | * | ||
5463 | * Validate that the given atomic state is programmable by DC into hardware. | ||
5464 | * This involves constructing a &struct dc_state reflecting the new hardware | ||
5465 | * state we wish to commit, then querying DC to see if it is programmable. It's | ||
5466 | * important not to modify the existing DC state. Otherwise, atomic_check | ||
5467 | * may unexpectedly commit hardware changes. | ||
5468 | * | ||
5469 | * When validating the DC state, it's important that the right locks are | ||
5470 | * acquired. For full updates case which removes/adds/updates streams on one | ||
5471 | * CRTC while flipping on another CRTC, acquiring global lock will guarantee | ||
5472 | * that any such full update commit will wait for completion of any outstanding | ||
5473 | * flip using DRMs synchronization events. See | ||
5474 | * dm_determine_update_type_for_commit() | ||
5475 | * | ||
5476 | * Note that DM adds the affected connectors for all CRTCs in state, when that | ||
5477 | * might not seem necessary. This is because DC stream creation requires the | ||
5478 | * DC sink, which is tied to the DRM connector state. Cleaning this up should | ||
5479 | * be possible but non-trivial - a possible TODO item. | ||
5480 | * | ||
5481 | * Return: -Error code if validation failed. | ||
5482 | */ | ||
5391 | static int amdgpu_dm_atomic_check(struct drm_device *dev, | 5483 | static int amdgpu_dm_atomic_check(struct drm_device *dev, |
5392 | struct drm_atomic_state *state) | 5484 | struct drm_atomic_state *state) |
5393 | { | 5485 | { |
@@ -5490,15 +5582,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, | |||
5490 | lock_and_validation_needed = true; | 5582 | lock_and_validation_needed = true; |
5491 | } | 5583 | } |
5492 | 5584 | ||
5493 | /* | ||
5494 | * For full updates case when | ||
5495 | * removing/adding/updating streams on one CRTC while flipping | ||
5496 | * on another CRTC, | ||
5497 | * acquiring global lock will guarantee that any such full | ||
5498 | * update commit | ||
5499 | * will wait for completion of any outstanding flip using DRMs | ||
5500 | * synchronization events. | ||
5501 | */ | ||
5502 | update_type = dm_determine_update_type_for_commit(dc, state); | 5585 | update_type = dm_determine_update_type_for_commit(dc, state); |
5503 | 5586 | ||
5504 | if (overall_update_type < update_type) | 5587 | if (overall_update_type < update_type) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 978b34a5011c..d6960644d714 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | |||
@@ -59,49 +59,100 @@ struct common_irq_params { | |||
59 | enum dc_irq_source irq_src; | 59 | enum dc_irq_source irq_src; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | /** | ||
63 | * struct irq_list_head - Linked-list for low context IRQ handlers. | ||
64 | * | ||
65 | * @head: The list_head within &struct handler_data | ||
66 | * @work: A work_struct containing the deferred handler work | ||
67 | */ | ||
62 | struct irq_list_head { | 68 | struct irq_list_head { |
63 | struct list_head head; | 69 | struct list_head head; |
64 | /* In case this interrupt needs post-processing, 'work' will be queued*/ | 70 | /* In case this interrupt needs post-processing, 'work' will be queued*/ |
65 | struct work_struct work; | 71 | struct work_struct work; |
66 | }; | 72 | }; |
67 | 73 | ||
74 | /** | ||
75 | * struct dm_compressor_info - Buffer info used by frame buffer compression | ||
76 | * @cpu_addr: MMIO cpu addr | ||
77 | * @bo_ptr: Pointer to the buffer object | ||
78 | * @gpu_addr: MMIO gpu addr | ||
79 | */ | ||
68 | struct dm_comressor_info { | 80 | struct dm_comressor_info { |
69 | void *cpu_addr; | 81 | void *cpu_addr; |
70 | struct amdgpu_bo *bo_ptr; | 82 | struct amdgpu_bo *bo_ptr; |
71 | uint64_t gpu_addr; | 83 | uint64_t gpu_addr; |
72 | }; | 84 | }; |
73 | 85 | ||
86 | /** | ||
87 | * struct amdgpu_display_manager - Central amdgpu display manager device | ||
88 | * | ||
89 | * @dc: Display Core control structure | ||
90 | * @adev: AMDGPU base driver structure | ||
91 | * @ddev: DRM base driver structure | ||
92 | * @display_indexes_num: Max number of display streams supported | ||
93 | * @irq_handler_list_table_lock: Synchronizes access to IRQ tables | ||
94 | * @backlight_dev: Backlight control device | ||
95 | * @cached_state: Caches device atomic state for suspend/resume | ||
96 | * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info | ||
97 | */ | ||
74 | struct amdgpu_display_manager { | 98 | struct amdgpu_display_manager { |
99 | |||
75 | struct dc *dc; | 100 | struct dc *dc; |
101 | |||
102 | /** | ||
103 | * @cgs_device: | ||
104 | * | ||
105 | * The Common Graphics Services device. It provides an interface for | ||
106 | * accessing registers. | ||
107 | */ | ||
76 | struct cgs_device *cgs_device; | 108 | struct cgs_device *cgs_device; |
77 | 109 | ||
78 | struct amdgpu_device *adev; /*AMD base driver*/ | 110 | struct amdgpu_device *adev; |
79 | struct drm_device *ddev; /*DRM base driver*/ | 111 | struct drm_device *ddev; |
80 | u16 display_indexes_num; | 112 | u16 display_indexes_num; |
81 | 113 | ||
82 | /* | 114 | /** |
83 | * 'irq_source_handler_table' holds a list of handlers | 115 | * @irq_handler_list_low_tab: |
84 | * per (DAL) IRQ source. | 116 | * |
117 | * Low priority IRQ handler table. | ||
85 | * | 118 | * |
86 | * Each IRQ source may need to be handled at different contexts. | 119 | * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ |
87 | * By 'context' we mean, for example: | 120 | * source. Low priority IRQ handlers are deferred to a workqueue to be |
88 | * - The ISR context, which is the direct interrupt handler. | 121 | * processed. Hence, they can sleep. |
89 | * - The 'deferred' context - this is the post-processing of the | ||
90 | * interrupt, but at a lower priority. | ||
91 | * | 122 | * |
92 | * Note that handlers are called in the same order as they were | 123 | * Note that handlers are called in the same order as they were |
93 | * registered (FIFO). | 124 | * registered (FIFO). |
94 | */ | 125 | */ |
95 | struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; | 126 | struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; |
127 | |||
128 | /** | ||
129 | * @irq_handler_list_high_tab: | ||
130 | * | ||
131 | * High priority IRQ handler table. | ||
132 | * | ||
133 | * It is a n*m table, same as &irq_handler_list_low_tab. However, | ||
134 | * handlers in this table are not deferred and are called immediately. | ||
135 | */ | ||
96 | struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; | 136 | struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; |
97 | 137 | ||
138 | /** | ||
139 | * @pflip_params: | ||
140 | * | ||
141 | * Page flip IRQ parameters, passed to registered handlers when | ||
142 | * triggered. | ||
143 | */ | ||
98 | struct common_irq_params | 144 | struct common_irq_params |
99 | pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; | 145 | pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; |
100 | 146 | ||
147 | /** | ||
148 | * @vblank_params: | ||
149 | * | ||
150 | * Vertical blanking IRQ parameters, passed to registered handlers when | ||
151 | * triggered. | ||
152 | */ | ||
101 | struct common_irq_params | 153 | struct common_irq_params |
102 | vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; | 154 | vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; |
103 | 155 | ||
104 | /* this spin lock synchronizes access to 'irq_handler_list_table' */ | ||
105 | spinlock_t irq_handler_list_table_lock; | 156 | spinlock_t irq_handler_list_table_lock; |
106 | 157 | ||
107 | struct backlight_device *backlight_dev; | 158 | struct backlight_device *backlight_dev; |
@@ -110,9 +161,6 @@ struct amdgpu_display_manager { | |||
110 | 161 | ||
111 | struct mod_freesync *freesync_module; | 162 | struct mod_freesync *freesync_module; |
112 | 163 | ||
113 | /** | ||
114 | * Caches device atomic state for suspend/resume | ||
115 | */ | ||
116 | struct drm_atomic_state *cached_state; | 164 | struct drm_atomic_state *cached_state; |
117 | 165 | ||
118 | struct dm_comressor_info compressor; | 166 | struct dm_comressor_info compressor; |
@@ -160,8 +208,6 @@ struct amdgpu_dm_connector { | |||
160 | struct mutex hpd_lock; | 208 | struct mutex hpd_lock; |
161 | 209 | ||
162 | bool fake_enable; | 210 | bool fake_enable; |
163 | |||
164 | bool mst_connected; | ||
165 | }; | 211 | }; |
166 | 212 | ||
167 | #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) | 213 | #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index be19e6861189..216e48cec716 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | |||
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) | |||
164 | */ | 164 | */ |
165 | stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; | 165 | stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; |
166 | ret = mod_color_calculate_regamma_params(stream->out_transfer_func, | 166 | ret = mod_color_calculate_regamma_params(stream->out_transfer_func, |
167 | gamma, true, adev->asic_type <= CHIP_RAVEN); | 167 | gamma, true, adev->asic_type <= CHIP_RAVEN, NULL); |
168 | dc_gamma_release(&gamma); | 168 | dc_gamma_release(&gamma); |
169 | if (!ret) { | 169 | if (!ret) { |
170 | stream->out_transfer_func->type = old_type; | 170 | stream->out_transfer_func->type = old_type; |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 01fc5717b657..f088ac585978 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c | |||
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) | |||
75 | return -EINVAL; | 75 | return -EINVAL; |
76 | } | 76 | } |
77 | 77 | ||
78 | if (!stream_state) { | ||
79 | DRM_ERROR("No stream state for CRTC%d\n", crtc->index); | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
78 | /* When enabling CRC, we should also disable dithering. */ | 83 | /* When enabling CRC, we should also disable dithering. */ |
79 | if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { | 84 | if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) { |
80 | if (dc_stream_configure_crc(stream_state->ctx->dc, | 85 | if (dc_stream_configure_crc(stream_state->ctx->dc, |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a212178f2edc..cd10f77cdeb0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | |||
@@ -32,16 +32,55 @@ | |||
32 | #include "amdgpu_dm.h" | 32 | #include "amdgpu_dm.h" |
33 | #include "amdgpu_dm_irq.h" | 33 | #include "amdgpu_dm_irq.h" |
34 | 34 | ||
35 | /** | ||
36 | * DOC: overview | ||
37 | * | ||
38 | * DM provides another layer of IRQ management on top of what the base driver | ||
39 | * already provides. This is something that could be cleaned up, and is a | ||
40 | * future TODO item. | ||
41 | * | ||
42 | * The base driver provides IRQ source registration with DRM, handler | ||
43 | * registration into the base driver's IRQ table, and a handler callback | ||
44 | * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic | ||
45 | * handler looks up the IRQ table, and calls the respective | ||
46 | * &amdgpu_irq_src_funcs.process hookups. | ||
47 | * | ||
48 | * What DM provides on top are two IRQ tables specifically for top-half and | ||
49 | * bottom-half IRQ handling, with the bottom-half implementing workqueues: | ||
50 | * | ||
51 | * - &amdgpu_display_manager.irq_handler_list_high_tab | ||
52 | * - &amdgpu_display_manager.irq_handler_list_low_tab | ||
53 | * | ||
54 | * They override the base driver's IRQ table, and the effect can be seen | ||
55 | * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They | ||
56 | * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up | ||
57 | * DM's IRQ tables. However, in order for base driver to recognize this hook, DM | ||
58 | * still needs to register the IRQ with the base driver. See | ||
59 | * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). | ||
60 | * | ||
61 | * To expose DC's hardware interrupt toggle to the base driver, DM implements | ||
62 | * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through | ||
63 | * amdgpu_irq_update() to enable or disable the interrupt. | ||
64 | */ | ||
65 | |||
35 | /****************************************************************************** | 66 | /****************************************************************************** |
36 | * Private declarations. | 67 | * Private declarations. |
37 | *****************************************************************************/ | 68 | *****************************************************************************/ |
38 | 69 | ||
70 | /** | ||
71 | * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. | ||
72 | * | ||
73 | * @list: Linked list entry referencing the next/previous handler | ||
74 | * @handler: Handler function | ||
75 | * @handler_arg: Argument passed to the handler when triggered | ||
76 | * @dm: DM which this handler belongs to | ||
77 | * @irq_source: DC interrupt source that this handler is registered for | ||
78 | */ | ||
39 | struct amdgpu_dm_irq_handler_data { | 79 | struct amdgpu_dm_irq_handler_data { |
40 | struct list_head list; | 80 | struct list_head list; |
41 | interrupt_handler handler; | 81 | interrupt_handler handler; |
42 | void *handler_arg; | 82 | void *handler_arg; |
43 | 83 | ||
44 | /* DM which this handler belongs to */ | ||
45 | struct amdgpu_display_manager *dm; | 84 | struct amdgpu_display_manager *dm; |
46 | /* DAL irq source which registered for this interrupt. */ | 85 | /* DAL irq source which registered for this interrupt. */ |
47 | enum dc_irq_source irq_source; | 86 | enum dc_irq_source irq_source; |
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, | |||
68 | } | 107 | } |
69 | 108 | ||
70 | /** | 109 | /** |
71 | * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. | 110 | * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. |
72 | * | 111 | * |
73 | * @work: work struct | 112 | * @work: work struct |
74 | */ | 113 | */ |
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work) | |||
99 | * (The most common use is HPD interrupt) */ | 138 | * (The most common use is HPD interrupt) */ |
100 | } | 139 | } |
101 | 140 | ||
102 | /** | 141 | /* |
103 | * Remove a handler and return a pointer to hander list from which the | 142 | * Remove a handler and return a pointer to handler list from which the |
104 | * handler was removed. | 143 | * handler was removed. |
105 | */ | 144 | */ |
106 | static struct list_head *remove_irq_handler(struct amdgpu_device *adev, | 145 | static struct list_head *remove_irq_handler(struct amdgpu_device *adev, |
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, | |||
203 | * Note: caller is responsible for input validation. | 242 | * Note: caller is responsible for input validation. |
204 | *****************************************************************************/ | 243 | *****************************************************************************/ |
205 | 244 | ||
245 | /** | ||
246 | * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. | ||
247 | * @adev: The base driver device containing the DM device. | ||
248 | * @int_params: Interrupt parameters containing the source, and handler context | ||
249 | * @ih: Function pointer to the interrupt handler to register | ||
250 | * @handler_args: Arguments passed to the handler when the interrupt occurs | ||
251 | * | ||
252 | * Register an interrupt handler for the given IRQ source, under the given | ||
253 | * context. The context can either be high or low. High context handlers are | ||
254 | * executed directly within ISR context, while low context is executed within a | ||
255 | * workqueue, thereby allowing operations that sleep. | ||
256 | * | ||
257 | * Registered handlers are called in a FIFO manner, i.e. the most recently | ||
258 | * registered handler will be called first. | ||
259 | * | ||
260 | * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ | ||
261 | * source, handler function, and args | ||
262 | */ | ||
206 | void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, | 263 | void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, |
207 | struct dc_interrupt_params *int_params, | 264 | struct dc_interrupt_params *int_params, |
208 | void (*ih)(void *), | 265 | void (*ih)(void *), |
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, | |||
261 | return handler_data; | 318 | return handler_data; |
262 | } | 319 | } |
263 | 320 | ||
321 | /** | ||
322 | * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table | ||
323 | * @adev: The base driver device containing the DM device | ||
324 | * @irq_source: IRQ source to remove the given handler from | ||
325 | * @ih: Function pointer to the interrupt handler to unregister | ||
326 | * | ||
327 | * Go through both low and high context IRQ tables, and find the given handler | ||
328 | * for the given irq source. If found, remove it. Otherwise, do nothing. | ||
329 | */ | ||
264 | void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, | 330 | void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, |
265 | enum dc_irq_source irq_source, | 331 | enum dc_irq_source irq_source, |
266 | void *ih) | 332 | void *ih) |
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, | |||
295 | } | 361 | } |
296 | } | 362 | } |
297 | 363 | ||
364 | /** | ||
365 | * amdgpu_dm_irq_init() - Initialize DM IRQ management | ||
366 | * @adev: The base driver device containing the DM device | ||
367 | * | ||
368 | * Initialize DM's high and low context IRQ tables. | ||
369 | * | ||
370 | * The N by M table contains N IRQ sources, with M | ||
371 | * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The | ||
372 | * list_heads are initialized here. When an interrupt n is triggered, all m | ||
373 | * handlers are called in sequence, FIFO according to registration order. | ||
374 | * | ||
375 | * The low context table requires special steps to initialize, since handlers | ||
376 | * will be deferred to a workqueue. See &struct irq_list_head. | ||
377 | */ | ||
298 | int amdgpu_dm_irq_init(struct amdgpu_device *adev) | 378 | int amdgpu_dm_irq_init(struct amdgpu_device *adev) |
299 | { | 379 | { |
300 | int src; | 380 | int src; |
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) | |||
317 | return 0; | 397 | return 0; |
318 | } | 398 | } |
319 | 399 | ||
320 | /* DM IRQ and timer resource release */ | 400 | /** |
401 | * amdgpu_dm_irq_fini() - Tear down DM IRQ management | ||
402 | * @adev: The base driver device containing the DM device | ||
403 | * | ||
404 | * Flush all work within the low context IRQ table. | ||
405 | */ | ||
321 | void amdgpu_dm_irq_fini(struct amdgpu_device *adev) | 406 | void amdgpu_dm_irq_fini(struct amdgpu_device *adev) |
322 | { | 407 | { |
323 | int src; | 408 | int src; |
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) | |||
414 | return 0; | 499 | return 0; |
415 | } | 500 | } |
416 | 501 | ||
417 | /** | 502 | /* |
418 | * amdgpu_dm_irq_schedule_work - schedule all work items registered for the | 503 | * amdgpu_dm_irq_schedule_work - schedule all work items registered for the |
419 | * "irq_source". | 504 | * "irq_source". |
420 | */ | 505 | */ |
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, | |||
439 | 524 | ||
440 | } | 525 | } |
441 | 526 | ||
442 | /** amdgpu_dm_irq_immediate_work | 527 | /* |
443 | * Callback high irq work immediately, don't send to work queue | 528 | * amdgpu_dm_irq_immediate_work |
529 | * Callback high irq work immediately, don't send to work queue | ||
444 | */ | 530 | */ |
445 | static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, | 531 | static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, |
446 | enum dc_irq_source irq_source) | 532 | enum dc_irq_source irq_source) |
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, | |||
467 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); | 553 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
468 | } | 554 | } |
469 | 555 | ||
470 | /* | 556 | /** |
471 | * amdgpu_dm_irq_handler | 557 | * amdgpu_dm_irq_handler - Generic DM IRQ handler |
558 | * @adev: amdgpu base driver device containing the DM device | ||
559 | * @source: Unused | ||
560 | * @entry: Data about the triggered interrupt | ||
472 | * | 561 | * |
473 | * Generic IRQ handler, calls all registered high irq work immediately, and | 562 | * Calls all registered high irq work immediately, and schedules work for low |
474 | * schedules work for low irq | 563 | * irq. The DM IRQ table is used to find the corresponding handlers. |
475 | */ | 564 | */ |
476 | static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, | 565 | static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, |
477 | struct amdgpu_irq_src *source, | 566 | struct amdgpu_irq_src *source, |
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) | |||
613 | adev->hpd_irq.funcs = &dm_hpd_irq_funcs; | 702 | adev->hpd_irq.funcs = &dm_hpd_irq_funcs; |
614 | } | 703 | } |
615 | 704 | ||
616 | /* | 705 | /** |
617 | * amdgpu_dm_hpd_init - hpd setup callback. | 706 | * amdgpu_dm_hpd_init - hpd setup callback. |
618 | * | 707 | * |
619 | * @adev: amdgpu_device pointer | 708 | * @adev: amdgpu_device pointer |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 03601d717fed..d02c32a1039c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | |||
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { | |||
205 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property | 205 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property |
206 | }; | 206 | }; |
207 | 207 | ||
208 | void dm_dp_mst_dc_sink_create(struct drm_connector *connector) | ||
209 | { | ||
210 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
211 | struct dc_sink *dc_sink; | ||
212 | struct dc_sink_init_data init_params = { | ||
213 | .link = aconnector->dc_link, | ||
214 | .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; | ||
215 | |||
216 | /* FIXME none of this is safe. we shouldn't touch aconnector here in | ||
217 | * atomic_check | ||
218 | */ | ||
219 | |||
220 | /* | ||
221 | * TODO: Need to further figure out why ddc.algo is NULL while MST port exists | ||
222 | */ | ||
223 | if (!aconnector->port || !aconnector->port->aux.ddc.algo) | ||
224 | return; | ||
225 | |||
226 | ASSERT(aconnector->edid); | ||
227 | |||
228 | dc_sink = dc_link_add_remote_sink( | ||
229 | aconnector->dc_link, | ||
230 | (uint8_t *)aconnector->edid, | ||
231 | (aconnector->edid->extensions + 1) * EDID_LENGTH, | ||
232 | &init_params); | ||
233 | |||
234 | dc_sink->priv = aconnector; | ||
235 | aconnector->dc_sink = dc_sink; | ||
236 | |||
237 | if (aconnector->dc_sink) | ||
238 | amdgpu_dm_update_freesync_caps( | ||
239 | connector, aconnector->edid); | ||
240 | } | ||
241 | |||
242 | static int dm_dp_mst_get_modes(struct drm_connector *connector) | 208 | static int dm_dp_mst_get_modes(struct drm_connector *connector) |
243 | { | 209 | { |
244 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | 210 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector) | |||
319 | struct amdgpu_device *adev = dev->dev_private; | 285 | struct amdgpu_device *adev = dev->dev_private; |
320 | struct amdgpu_encoder *amdgpu_encoder; | 286 | struct amdgpu_encoder *amdgpu_encoder; |
321 | struct drm_encoder *encoder; | 287 | struct drm_encoder *encoder; |
322 | const struct drm_connector_helper_funcs *connector_funcs = | ||
323 | connector->base.helper_private; | ||
324 | struct drm_encoder *enc_master = | ||
325 | connector_funcs->best_encoder(&connector->base); | ||
326 | 288 | ||
327 | DRM_DEBUG_KMS("enc master is %p\n", enc_master); | ||
328 | amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); | 289 | amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL); |
329 | if (!amdgpu_encoder) | 290 | if (!amdgpu_encoder) |
330 | return NULL; | 291 | return NULL; |
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
354 | struct amdgpu_device *adev = dev->dev_private; | 315 | struct amdgpu_device *adev = dev->dev_private; |
355 | struct amdgpu_dm_connector *aconnector; | 316 | struct amdgpu_dm_connector *aconnector; |
356 | struct drm_connector *connector; | 317 | struct drm_connector *connector; |
357 | struct drm_connector_list_iter conn_iter; | ||
358 | |||
359 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
360 | drm_for_each_connector_iter(connector, &conn_iter) { | ||
361 | aconnector = to_amdgpu_dm_connector(connector); | ||
362 | if (aconnector->mst_port == master | ||
363 | && !aconnector->port) { | ||
364 | DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n", | ||
365 | aconnector, connector->base.id, aconnector->mst_port); | ||
366 | |||
367 | aconnector->port = port; | ||
368 | drm_connector_set_path_property(connector, pathprop); | ||
369 | |||
370 | drm_connector_list_iter_end(&conn_iter); | ||
371 | aconnector->mst_connected = true; | ||
372 | return &aconnector->base; | ||
373 | } | ||
374 | } | ||
375 | drm_connector_list_iter_end(&conn_iter); | ||
376 | 318 | ||
377 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); | 319 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); |
378 | if (!aconnector) | 320 | if (!aconnector) |
@@ -421,8 +363,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
421 | */ | 363 | */ |
422 | amdgpu_dm_connector_funcs_reset(connector); | 364 | amdgpu_dm_connector_funcs_reset(connector); |
423 | 365 | ||
424 | aconnector->mst_connected = true; | ||
425 | |||
426 | DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", | 366 | DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", |
427 | aconnector, connector->base.id, aconnector->mst_port); | 367 | aconnector, connector->base.id, aconnector->mst_port); |
428 | 368 | ||
@@ -434,6 +374,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
434 | static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | 374 | static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, |
435 | struct drm_connector *connector) | 375 | struct drm_connector *connector) |
436 | { | 376 | { |
377 | struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); | ||
378 | struct drm_device *dev = master->base.dev; | ||
379 | struct amdgpu_device *adev = dev->dev_private; | ||
437 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | 380 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
438 | 381 | ||
439 | DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", | 382 | DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n", |
@@ -447,7 +390,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, | |||
447 | aconnector->dc_sink = NULL; | 390 | aconnector->dc_sink = NULL; |
448 | } | 391 | } |
449 | 392 | ||
450 | aconnector->mst_connected = false; | 393 | drm_connector_unregister(connector); |
394 | if (adev->mode_info.rfbdev) | ||
395 | drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector); | ||
396 | drm_connector_put(connector); | ||
451 | } | 397 | } |
452 | 398 | ||
453 | static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | 399 | static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) |
@@ -458,18 +404,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) | |||
458 | drm_kms_helper_hotplug_event(dev); | 404 | drm_kms_helper_hotplug_event(dev); |
459 | } | 405 | } |
460 | 406 | ||
461 | static void dm_dp_mst_link_status_reset(struct drm_connector *connector) | ||
462 | { | ||
463 | mutex_lock(&connector->dev->mode_config.mutex); | ||
464 | drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); | ||
465 | mutex_unlock(&connector->dev->mode_config.mutex); | ||
466 | } | ||
467 | |||
468 | static void dm_dp_mst_register_connector(struct drm_connector *connector) | 407 | static void dm_dp_mst_register_connector(struct drm_connector *connector) |
469 | { | 408 | { |
470 | struct drm_device *dev = connector->dev; | 409 | struct drm_device *dev = connector->dev; |
471 | struct amdgpu_device *adev = dev->dev_private; | 410 | struct amdgpu_device *adev = dev->dev_private; |
472 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
473 | 411 | ||
474 | if (adev->mode_info.rfbdev) | 412 | if (adev->mode_info.rfbdev) |
475 | drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); | 413 | drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); |
@@ -477,9 +415,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) | |||
477 | DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); | 415 | DRM_ERROR("adev->mode_info.rfbdev is NULL\n"); |
478 | 416 | ||
479 | drm_connector_register(connector); | 417 | drm_connector_register(connector); |
480 | |||
481 | if (aconnector->mst_connected) | ||
482 | dm_dp_mst_link_status_reset(connector); | ||
483 | } | 418 | } |
484 | 419 | ||
485 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { | 420 | static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 8cf51da26657..2da851b40042 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | |||
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector; | |||
31 | 31 | ||
32 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, | 32 | void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, |
33 | struct amdgpu_dm_connector *aconnector); | 33 | struct amdgpu_dm_connector *aconnector); |
34 | void dm_dp_mst_dc_sink_create(struct drm_connector *connector); | ||
35 | 34 | ||
36 | #endif | 35 | #endif |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 12001a006b2d..9d2d6986b983 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | |||
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp, | |||
485 | return; | 485 | return; |
486 | 486 | ||
487 | clock.clock_type = amd_pp_dcf_clock; | 487 | clock.clock_type = amd_pp_dcf_clock; |
488 | clock.clock_freq_in_khz = req->hard_min_dcefclk_khz; | 488 | clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000; |
489 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | 489 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); |
490 | 490 | ||
491 | clock.clock_type = amd_pp_f_clock; | 491 | clock.clock_type = amd_pp_f_clock; |
492 | clock.clock_freq_in_khz = req->hard_min_fclk_khz; | 492 | clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000; |
493 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); | 493 | pp_funcs->display_clock_voltage_request(pp_handle, &clock); |
494 | } | 494 | } |
495 | 495 | ||
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, | |||
518 | wm_dce_clocks[i].wm_set_id = | 518 | wm_dce_clocks[i].wm_set_id = |
519 | ranges->reader_wm_sets[i].wm_inst; | 519 | ranges->reader_wm_sets[i].wm_inst; |
520 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = | 520 | wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = |
521 | ranges->reader_wm_sets[i].max_drain_clk_khz; | 521 | ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; |
522 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = | 522 | wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = |
523 | ranges->reader_wm_sets[i].min_drain_clk_khz; | 523 | ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; |
524 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = | 524 | wm_dce_clocks[i].wm_max_mem_clk_in_khz = |
525 | ranges->reader_wm_sets[i].max_fill_clk_khz; | 525 | ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; |
526 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = | 526 | wm_dce_clocks[i].wm_min_mem_clk_in_khz = |
527 | ranges->reader_wm_sets[i].min_fill_clk_khz; | 527 | ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; |
528 | } | 528 | } |
529 | 529 | ||
530 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { | 530 | for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { |
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, | |||
534 | wm_soc_clocks[i].wm_set_id = | 534 | wm_soc_clocks[i].wm_set_id = |
535 | ranges->writer_wm_sets[i].wm_inst; | 535 | ranges->writer_wm_sets[i].wm_inst; |
536 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = | 536 | wm_soc_clocks[i].wm_max_socclk_clk_in_khz = |
537 | ranges->writer_wm_sets[i].max_fill_clk_khz; | 537 | ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; |
538 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = | 538 | wm_soc_clocks[i].wm_min_socclk_clk_in_khz = |
539 | ranges->writer_wm_sets[i].min_fill_clk_khz; | 539 | ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; |
540 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = | 540 | wm_soc_clocks[i].wm_max_mem_clk_in_khz = |
541 | ranges->writer_wm_sets[i].max_drain_clk_khz; | 541 | ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; |
542 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = | 542 | wm_soc_clocks[i].wm_min_mem_clk_in_khz = |
543 | ranges->writer_wm_sets[i].min_drain_clk_khz; | 543 | ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; |
544 | } | 544 | } |
545 | 545 | ||
546 | pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); | 546 | pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 0e1dc1b1a48d..c2ab026aee91 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | |||
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object, | |||
2030 | static struct device_id device_type_from_device_id(uint16_t device_id) | 2030 | static struct device_id device_type_from_device_id(uint16_t device_id) |
2031 | { | 2031 | { |
2032 | 2032 | ||
2033 | struct device_id result_device_id; | 2033 | struct device_id result_device_id = {0}; |
2034 | 2034 | ||
2035 | switch (device_id) { | 2035 | switch (device_id) { |
2036 | case ATOM_DEVICE_LCD1_SUPPORT: | 2036 | case ATOM_DEVICE_LCD1_SUPPORT: |
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ff764da21b6f..751bb614fc0e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | |||
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = { | |||
1884 | 1884 | ||
1885 | .is_accelerated_mode = bios_parser_is_accelerated_mode, | 1885 | .is_accelerated_mode = bios_parser_is_accelerated_mode, |
1886 | 1886 | ||
1887 | .is_active_display = bios_is_active_display, | ||
1888 | |||
1887 | .set_scratch_critical_state = bios_parser_set_scratch_critical_state, | 1889 | .set_scratch_critical_state = bios_parser_set_scratch_critical_state, |
1888 | 1890 | ||
1889 | 1891 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c index d4589470985c..fdda8aa8e303 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c | |||
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays( | |||
88 | return active_disp; | 88 | return active_disp; |
89 | } | 89 | } |
90 | 90 | ||
91 | bool bios_is_active_display( | ||
92 | struct dc_bios *bios, | ||
93 | enum signal_type signal, | ||
94 | const struct connector_device_tag_info *device_tag) | ||
95 | { | ||
96 | uint32_t active = 0; | ||
97 | uint32_t connected = 0; | ||
98 | uint32_t bios_scratch_0 = 0; | ||
99 | uint32_t bios_scratch_3 = 0; | ||
100 | |||
101 | switch (signal) { | ||
102 | case SIGNAL_TYPE_DVI_SINGLE_LINK: | ||
103 | case SIGNAL_TYPE_DVI_DUAL_LINK: | ||
104 | case SIGNAL_TYPE_HDMI_TYPE_A: | ||
105 | case SIGNAL_TYPE_DISPLAY_PORT: | ||
106 | case SIGNAL_TYPE_DISPLAY_PORT_MST: | ||
107 | { | ||
108 | if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) { | ||
109 | switch (device_tag->dev_id.enum_id) { | ||
110 | case 1: | ||
111 | { | ||
112 | active = ATOM_S3_DFP1_ACTIVE; | ||
113 | connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT | ||
114 | } | ||
115 | break; | ||
116 | |||
117 | case 2: | ||
118 | { | ||
119 | active = ATOM_S3_DFP2_ACTIVE; | ||
120 | connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT | ||
121 | } | ||
122 | break; | ||
123 | |||
124 | case 3: | ||
125 | { | ||
126 | active = ATOM_S3_DFP3_ACTIVE; | ||
127 | connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT | ||
128 | } | ||
129 | break; | ||
130 | |||
131 | case 4: | ||
132 | { | ||
133 | active = ATOM_S3_DFP4_ACTIVE; | ||
134 | connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT | ||
135 | } | ||
136 | break; | ||
137 | |||
138 | case 5: | ||
139 | { | ||
140 | active = ATOM_S3_DFP5_ACTIVE; | ||
141 | connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT | ||
142 | } | ||
143 | break; | ||
144 | |||
145 | case 6: | ||
146 | { | ||
147 | active = ATOM_S3_DFP6_ACTIVE; | ||
148 | connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT | ||
149 | } | ||
150 | break; | ||
151 | |||
152 | default: | ||
153 | break; | ||
154 | } | ||
155 | } | ||
156 | } | ||
157 | break; | ||
158 | |||
159 | case SIGNAL_TYPE_LVDS: | ||
160 | case SIGNAL_TYPE_EDP: | ||
161 | { | ||
162 | active = ATOM_S3_LCD1_ACTIVE; | ||
163 | connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT | ||
164 | } | ||
165 | break; | ||
166 | |||
167 | default: | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | |||
172 | if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/ | ||
173 | bios_scratch_0 = REG_READ(BIOS_SCRATCH_0); | ||
174 | if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/ | ||
175 | bios_scratch_3 = REG_READ(BIOS_SCRATCH_3); | ||
176 | |||
177 | bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK; | ||
178 | if ((active & bios_scratch_3) && (connected & bios_scratch_0)) | ||
179 | return true; | ||
180 | |||
181 | return false; | ||
182 | } | ||
183 | |||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h index 75a29e68fb27..f33cac2147e3 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h | |||
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios); | |||
35 | void bios_set_scratch_acc_mode_change(struct dc_bios *bios); | 35 | void bios_set_scratch_acc_mode_change(struct dc_bios *bios); |
36 | void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); | 36 | void bios_set_scratch_critical_state(struct dc_bios *bios, bool state); |
37 | uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); | 37 | uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios); |
38 | bool bios_is_active_display( | ||
39 | struct dc_bios *bios, | ||
40 | enum signal_type signal, | ||
41 | const struct connector_device_tag_info *device_tag); | ||
38 | 42 | ||
39 | #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) | 43 | #define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type))) |
40 | 44 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 3208188b7ed4..43e4a2be0fa6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | |||
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) | |||
1423 | ranges.num_reader_wm_sets = WM_SET_COUNT; | 1423 | ranges.num_reader_wm_sets = WM_SET_COUNT; |
1424 | ranges.num_writer_wm_sets = WM_SET_COUNT; | 1424 | ranges.num_writer_wm_sets = WM_SET_COUNT; |
1425 | ranges.reader_wm_sets[0].wm_inst = WM_A; | 1425 | ranges.reader_wm_sets[0].wm_inst = WM_A; |
1426 | ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz; | 1426 | ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; |
1427 | ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive; | 1427 | ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; |
1428 | ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz; | 1428 | ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; |
1429 | ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive; | 1429 | ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; |
1430 | ranges.writer_wm_sets[0].wm_inst = WM_A; | 1430 | ranges.writer_wm_sets[0].wm_inst = WM_A; |
1431 | ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz; | 1431 | ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; |
1432 | ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive; | 1432 | ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; |
1433 | ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz; | 1433 | ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; |
1434 | ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive; | 1434 | ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; |
1435 | 1435 | ||
1436 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { | 1436 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { |
1437 | ranges.reader_wm_sets[0].wm_inst = WM_A; | 1437 | ranges.reader_wm_sets[0].wm_inst = WM_A; |
1438 | ranges.reader_wm_sets[0].min_drain_clk_khz = 300000; | 1438 | ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; |
1439 | ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000; | 1439 | ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; |
1440 | ranges.reader_wm_sets[0].min_fill_clk_khz = 800000; | 1440 | ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; |
1441 | ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000; | 1441 | ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; |
1442 | ranges.writer_wm_sets[0].wm_inst = WM_A; | 1442 | ranges.writer_wm_sets[0].wm_inst = WM_A; |
1443 | ranges.writer_wm_sets[0].min_fill_clk_khz = 200000; | 1443 | ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; |
1444 | ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000; | 1444 | ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; |
1445 | ranges.writer_wm_sets[0].min_drain_clk_khz = 800000; | 1445 | ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; |
1446 | ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000; | 1446 | ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; | 1449 | ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7c491c91465f..3279e26c3440 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c | |||
@@ -391,9 +391,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream) | |||
391 | == stream) { | 391 | == stream) { |
392 | 392 | ||
393 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; | 393 | pipes = &dc->current_state->res_ctx.pipe_ctx[i]; |
394 | dc->hwss.program_csc_matrix(pipes, | 394 | dc->hwss.program_output_csc(dc, |
395 | stream->output_color_space, | 395 | pipes, |
396 | stream->csc_color_matrix.matrix); | 396 | stream->output_color_space, |
397 | stream->csc_color_matrix.matrix, | ||
398 | pipes->plane_res.hubp->opp_id); | ||
397 | ret = true; | 399 | ret = true; |
398 | } | 400 | } |
399 | } | 401 | } |
@@ -941,7 +943,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
941 | if (!dcb->funcs->is_accelerated_mode(dcb)) | 943 | if (!dcb->funcs->is_accelerated_mode(dcb)) |
942 | dc->hwss.enable_accelerated_mode(dc, context); | 944 | dc->hwss.enable_accelerated_mode(dc, context); |
943 | 945 | ||
944 | dc->hwss.set_bandwidth(dc, context, false); | 946 | dc->hwss.prepare_bandwidth(dc, context); |
945 | 947 | ||
946 | /* re-program planes for existing stream, in case we need to | 948 | /* re-program planes for existing stream, in case we need to |
947 | * free up plane resource for later use | 949 | * free up plane resource for later use |
@@ -957,8 +959,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
957 | } | 959 | } |
958 | 960 | ||
959 | /* Program hardware */ | 961 | /* Program hardware */ |
960 | dc->hwss.ready_shared_resources(dc, context); | ||
961 | |||
962 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | 962 | for (i = 0; i < dc->res_pool->pipe_count; i++) { |
963 | pipe = &context->res_ctx.pipe_ctx[i]; | 963 | pipe = &context->res_ctx.pipe_ctx[i]; |
964 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); | 964 | dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe); |
@@ -1012,7 +1012,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
1012 | dc_enable_stereo(dc, context, dc_streams, context->stream_count); | 1012 | dc_enable_stereo(dc, context, dc_streams, context->stream_count); |
1013 | 1013 | ||
1014 | /* pplib is notified if disp_num changed */ | 1014 | /* pplib is notified if disp_num changed */ |
1015 | dc->hwss.set_bandwidth(dc, context, true); | 1015 | dc->hwss.optimize_bandwidth(dc, context); |
1016 | 1016 | ||
1017 | dc_release_state(dc->current_state); | 1017 | dc_release_state(dc->current_state); |
1018 | 1018 | ||
@@ -1020,8 +1020,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c | |||
1020 | 1020 | ||
1021 | dc_retain_state(dc->current_state); | 1021 | dc_retain_state(dc->current_state); |
1022 | 1022 | ||
1023 | dc->hwss.optimize_shared_resources(dc); | ||
1024 | |||
1025 | return result; | 1023 | return result; |
1026 | } | 1024 | } |
1027 | 1025 | ||
@@ -1063,7 +1061,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) | |||
1063 | 1061 | ||
1064 | dc->optimized_required = false; | 1062 | dc->optimized_required = false; |
1065 | 1063 | ||
1066 | dc->hwss.set_bandwidth(dc, context, true); | 1064 | dc->hwss.optimize_bandwidth(dc, context); |
1067 | return true; | 1065 | return true; |
1068 | } | 1066 | } |
1069 | 1067 | ||
@@ -1369,35 +1367,6 @@ static struct dc_stream_status *stream_get_status( | |||
1369 | 1367 | ||
1370 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; | 1368 | static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL; |
1371 | 1369 | ||
1372 | static void notify_display_count_to_smu( | ||
1373 | struct dc *dc, | ||
1374 | struct dc_state *context) | ||
1375 | { | ||
1376 | int i, display_count; | ||
1377 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
1378 | |||
1379 | /* | ||
1380 | * if function pointer not set up, this message is | ||
1381 | * sent as part of pplib_apply_display_requirements. | ||
1382 | * So just return. | ||
1383 | */ | ||
1384 | if (!pp_smu || !pp_smu->set_display_count) | ||
1385 | return; | ||
1386 | |||
1387 | display_count = 0; | ||
1388 | for (i = 0; i < context->stream_count; i++) { | ||
1389 | const struct dc_stream_state *stream = context->streams[i]; | ||
1390 | |||
1391 | /* only notify active stream */ | ||
1392 | if (stream->dpms_off) | ||
1393 | continue; | ||
1394 | |||
1395 | display_count++; | ||
1396 | } | ||
1397 | |||
1398 | pp_smu->set_display_count(&pp_smu->pp_smu, display_count); | ||
1399 | } | ||
1400 | |||
1401 | static void commit_planes_do_stream_update(struct dc *dc, | 1370 | static void commit_planes_do_stream_update(struct dc *dc, |
1402 | struct dc_stream_state *stream, | 1371 | struct dc_stream_state *stream, |
1403 | struct dc_stream_update *stream_update, | 1372 | struct dc_stream_update *stream_update, |
@@ -1422,7 +1391,6 @@ static void commit_planes_do_stream_update(struct dc *dc, | |||
1422 | stream_update->adjust->v_total_max); | 1391 | stream_update->adjust->v_total_max); |
1423 | 1392 | ||
1424 | if (stream_update->periodic_fn_vsync_delta && | 1393 | if (stream_update->periodic_fn_vsync_delta && |
1425 | pipe_ctx->stream_res.tg && | ||
1426 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) | 1394 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) |
1427 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( | 1395 | pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( |
1428 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, | 1396 | pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, |
@@ -1448,19 +1416,13 @@ static void commit_planes_do_stream_update(struct dc *dc, | |||
1448 | if (stream_update->dpms_off) { | 1416 | if (stream_update->dpms_off) { |
1449 | if (*stream_update->dpms_off) { | 1417 | if (*stream_update->dpms_off) { |
1450 | core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); | 1418 | core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE); |
1451 | dc->hwss.pplib_apply_display_requirements( | 1419 | dc->hwss.optimize_bandwidth(dc, dc->current_state); |
1452 | dc, dc->current_state); | ||
1453 | notify_display_count_to_smu(dc, dc->current_state); | ||
1454 | } else { | 1420 | } else { |
1455 | dc->hwss.pplib_apply_display_requirements( | 1421 | dc->hwss.prepare_bandwidth(dc, dc->current_state); |
1456 | dc, dc->current_state); | ||
1457 | notify_display_count_to_smu(dc, dc->current_state); | ||
1458 | core_link_enable_stream(dc->current_state, pipe_ctx); | 1422 | core_link_enable_stream(dc->current_state, pipe_ctx); |
1459 | } | 1423 | } |
1460 | } | 1424 | } |
1461 | 1425 | ||
1462 | |||
1463 | |||
1464 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { | 1426 | if (stream_update->abm_level && pipe_ctx->stream_res.abm) { |
1465 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) { | 1427 | if (pipe_ctx->stream_res.tg->funcs->is_blanked) { |
1466 | // if otg funcs defined check if blanked before programming | 1428 | // if otg funcs defined check if blanked before programming |
@@ -1487,7 +1449,7 @@ static void commit_planes_for_stream(struct dc *dc, | |||
1487 | struct pipe_ctx *top_pipe_to_program = NULL; | 1449 | struct pipe_ctx *top_pipe_to_program = NULL; |
1488 | 1450 | ||
1489 | if (update_type == UPDATE_TYPE_FULL) { | 1451 | if (update_type == UPDATE_TYPE_FULL) { |
1490 | dc->hwss.set_bandwidth(dc, context, false); | 1452 | dc->hwss.prepare_bandwidth(dc, context); |
1491 | context_clock_trace(dc, context); | 1453 | context_clock_trace(dc, context); |
1492 | } | 1454 | } |
1493 | 1455 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index e1ebdf7b5eaf..73d049506618 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c | |||
@@ -311,7 +311,7 @@ void context_timing_trace( | |||
311 | { | 311 | { |
312 | int i; | 312 | int i; |
313 | struct dc *core_dc = dc; | 313 | struct dc *core_dc = dc; |
314 | int h_pos[MAX_PIPES], v_pos[MAX_PIPES]; | 314 | int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0}; |
315 | struct crtc_position position; | 315 | struct crtc_position position; |
316 | unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; | 316 | unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; |
317 | DC_LOGGER_INIT(dc->ctx->logger); | 317 | DC_LOGGER_INIT(dc->ctx->logger); |
@@ -322,8 +322,7 @@ void context_timing_trace( | |||
322 | /* get_position() returns CRTC vertical/horizontal counter | 322 | /* get_position() returns CRTC vertical/horizontal counter |
323 | * hence not applicable for underlay pipe | 323 | * hence not applicable for underlay pipe |
324 | */ | 324 | */ |
325 | if (pipe_ctx->stream == NULL | 325 | if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) |
326 | || pipe_ctx->pipe_idx == underlay_idx) | ||
327 | continue; | 326 | continue; |
328 | 327 | ||
329 | pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); | 328 | pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position); |
@@ -333,7 +332,7 @@ void context_timing_trace( | |||
333 | for (i = 0; i < core_dc->res_pool->pipe_count; i++) { | 332 | for (i = 0; i < core_dc->res_pool->pipe_count; i++) { |
334 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | 333 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; |
335 | 334 | ||
336 | if (pipe_ctx->stream == NULL) | 335 | if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx) |
337 | continue; | 336 | continue; |
338 | 337 | ||
339 | TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", | 338 | TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n", |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fb04a4ad141f..7ee9c033acbd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c | |||
@@ -1357,28 +1357,13 @@ static enum dc_status enable_link_dp( | |||
1357 | struct dc_link *link = stream->sink->link; | 1357 | struct dc_link *link = stream->sink->link; |
1358 | struct dc_link_settings link_settings = {0}; | 1358 | struct dc_link_settings link_settings = {0}; |
1359 | enum dp_panel_mode panel_mode; | 1359 | enum dp_panel_mode panel_mode; |
1360 | enum dc_link_rate max_link_rate = LINK_RATE_HIGH2; | ||
1361 | 1360 | ||
1362 | /* get link settings for video mode timing */ | 1361 | /* get link settings for video mode timing */ |
1363 | decide_link_settings(stream, &link_settings); | 1362 | decide_link_settings(stream, &link_settings); |
1364 | 1363 | ||
1365 | /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS | 1364 | pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = |
1366 | * logic for HBR3 still needs Nominal (0.8V) on VDDC rail | 1365 | link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; |
1367 | */ | 1366 | state->dccg->funcs->update_clocks(state->dccg, state, false); |
1368 | if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) | ||
1369 | max_link_rate = LINK_RATE_HIGH3; | ||
1370 | |||
1371 | if (link_settings.link_rate == max_link_rate) { | ||
1372 | struct dc_clocks clocks = state->bw.dcn.clk; | ||
1373 | |||
1374 | /* dce/dcn compat, do not update dispclk */ | ||
1375 | clocks.dispclk_khz = 0; | ||
1376 | /* 27mhz = 27000000hz= 27000khz */ | ||
1377 | clocks.phyclk_khz = link_settings.link_rate * 27000; | ||
1378 | |||
1379 | state->dis_clk->funcs->update_clocks( | ||
1380 | state->dis_clk, &clocks, false); | ||
1381 | } | ||
1382 | 1367 | ||
1383 | dp_enable_link_phy( | 1368 | dp_enable_link_phy( |
1384 | link, | 1369 | link, |
@@ -1722,7 +1707,7 @@ static void write_i2c_retimer_setting( | |||
1722 | i2c_success = i2c_write(pipe_ctx, slave_address, | 1707 | i2c_success = i2c_write(pipe_ctx, slave_address, |
1723 | buffer, sizeof(buffer)); | 1708 | buffer, sizeof(buffer)); |
1724 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ | 1709 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ |
1725 | offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", | 1710 | offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
1726 | slave_address, buffer[0], buffer[1], i2c_success?1:0); | 1711 | slave_address, buffer[0], buffer[1], i2c_success?1:0); |
1727 | if (!i2c_success) | 1712 | if (!i2c_success) |
1728 | /* Write failure */ | 1713 | /* Write failure */ |
@@ -1734,7 +1719,7 @@ static void write_i2c_retimer_setting( | |||
1734 | i2c_success = i2c_write(pipe_ctx, slave_address, | 1719 | i2c_success = i2c_write(pipe_ctx, slave_address, |
1735 | buffer, sizeof(buffer)); | 1720 | buffer, sizeof(buffer)); |
1736 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ | 1721 | RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ |
1737 | offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n", | 1722 | offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", |
1738 | slave_address, buffer[0], buffer[1], i2c_success?1:0); | 1723 | slave_address, buffer[0], buffer[1], i2c_success?1:0); |
1739 | if (!i2c_success) | 1724 | if (!i2c_success) |
1740 | /* Write failure */ | 1725 | /* Write failure */ |
@@ -2156,14 +2141,16 @@ int dc_link_get_backlight_level(const struct dc_link *link) | |||
2156 | { | 2141 | { |
2157 | struct abm *abm = link->ctx->dc->res_pool->abm; | 2142 | struct abm *abm = link->ctx->dc->res_pool->abm; |
2158 | 2143 | ||
2159 | if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL) | 2144 | if (abm == NULL || abm->funcs->get_current_backlight == NULL) |
2160 | return DC_ERROR_UNEXPECTED; | 2145 | return DC_ERROR_UNEXPECTED; |
2161 | 2146 | ||
2162 | return (int) abm->funcs->get_current_backlight_8_bit(abm); | 2147 | return (int) abm->funcs->get_current_backlight(abm); |
2163 | } | 2148 | } |
2164 | 2149 | ||
2165 | bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | 2150 | bool dc_link_set_backlight_level(const struct dc_link *link, |
2166 | uint32_t frame_ramp, const struct dc_stream_state *stream) | 2151 | uint32_t backlight_pwm_u16_16, |
2152 | uint32_t frame_ramp, | ||
2153 | const struct dc_stream_state *stream) | ||
2167 | { | 2154 | { |
2168 | struct dc *core_dc = link->ctx->dc; | 2155 | struct dc *core_dc = link->ctx->dc; |
2169 | struct abm *abm = core_dc->res_pool->abm; | 2156 | struct abm *abm = core_dc->res_pool->abm; |
@@ -2175,19 +2162,17 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | |||
2175 | 2162 | ||
2176 | if ((dmcu == NULL) || | 2163 | if ((dmcu == NULL) || |
2177 | (abm == NULL) || | 2164 | (abm == NULL) || |
2178 | (abm->funcs->set_backlight_level == NULL)) | 2165 | (abm->funcs->set_backlight_level_pwm == NULL)) |
2179 | return false; | 2166 | return false; |
2180 | 2167 | ||
2181 | if (stream) { | 2168 | if (stream) |
2182 | if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL) | 2169 | ((struct dc_stream_state *)stream)->bl_pwm_level = |
2183 | frame_ramp = 0; | 2170 | backlight_pwm_u16_16; |
2184 | |||
2185 | ((struct dc_stream_state *)stream)->bl_pwm_level = level; | ||
2186 | } | ||
2187 | 2171 | ||
2188 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); | 2172 | use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); |
2189 | 2173 | ||
2190 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level); | 2174 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
2175 | backlight_pwm_u16_16, backlight_pwm_u16_16); | ||
2191 | 2176 | ||
2192 | if (dc_is_embedded_signal(link->connector_signal)) { | 2177 | if (dc_is_embedded_signal(link->connector_signal)) { |
2193 | if (stream != NULL) { | 2178 | if (stream != NULL) { |
@@ -2204,9 +2189,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, | |||
2204 | 1; | 2189 | 1; |
2205 | } | 2190 | } |
2206 | } | 2191 | } |
2207 | abm->funcs->set_backlight_level( | 2192 | abm->funcs->set_backlight_level_pwm( |
2208 | abm, | 2193 | abm, |
2209 | level, | 2194 | backlight_pwm_u16_16, |
2210 | frame_ramp, | 2195 | frame_ramp, |
2211 | controller_id, | 2196 | controller_id, |
2212 | use_smooth_brightness); | 2197 | use_smooth_brightness); |
@@ -2220,7 +2205,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link) | |||
2220 | struct dc *core_dc = link->ctx->dc; | 2205 | struct dc *core_dc = link->ctx->dc; |
2221 | struct abm *abm = core_dc->res_pool->abm; | 2206 | struct abm *abm = core_dc->res_pool->abm; |
2222 | 2207 | ||
2223 | if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL)) | 2208 | if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL)) |
2224 | return false; | 2209 | return false; |
2225 | 2210 | ||
2226 | abm->funcs->set_abm_immediate_disable(abm); | 2211 | abm->funcs->set_abm_immediate_disable(abm); |
@@ -2609,6 +2594,10 @@ void core_link_enable_stream( | |||
2609 | core_dc->hwss.unblank_stream(pipe_ctx, | 2594 | core_dc->hwss.unblank_stream(pipe_ctx, |
2610 | &pipe_ctx->stream->sink->link->cur_link_settings); | 2595 | &pipe_ctx->stream->sink->link->cur_link_settings); |
2611 | 2596 | ||
2597 | dc_link_set_backlight_level(pipe_ctx->stream->sink->link, | ||
2598 | pipe_ctx->stream->bl_pwm_level, | ||
2599 | 0, | ||
2600 | pipe_ctx->stream); | ||
2612 | } | 2601 | } |
2613 | 2602 | ||
2614 | } | 2603 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b6fe29b9fb65..fc65b0055167 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c | |||
@@ -499,8 +499,13 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
499 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; | 499 | pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state; |
500 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; | 500 | bool flip_vert_scan_dir = false, flip_horz_scan_dir = false; |
501 | 501 | ||
502 | |||
502 | /* | 503 | /* |
503 | * Need to calculate the scan direction for viewport to properly determine offset | 504 | * We need take horizontal mirror into account. On an unrotated surface this means |
505 | * that the viewport offset is actually the offset from the other side of source | ||
506 | * image so we have to subtract the right edge of the viewport from the right edge of | ||
507 | * the source window. Similar to mirror we need to take into account how offset is | ||
508 | * affected for 270/180 rotations | ||
504 | */ | 509 | */ |
505 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { | 510 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) { |
506 | flip_vert_scan_dir = true; | 511 | flip_vert_scan_dir = true; |
@@ -510,6 +515,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
510 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | 515 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) |
511 | flip_horz_scan_dir = true; | 516 | flip_horz_scan_dir = true; |
512 | 517 | ||
518 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
519 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
520 | |||
513 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || | 521 | if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE || |
514 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { | 522 | stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { |
515 | pri_split = false; | 523 | pri_split = false; |
@@ -540,45 +548,27 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) | |||
540 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; | 548 | plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ; |
541 | 549 | ||
542 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio | 550 | /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio |
551 | * note: surf_src.ofs should be added after rotation/mirror offset direction | ||
552 | * adjustment since it is already in viewport space | ||
543 | * num_pixels = clip.num_pix * scl_ratio | 553 | * num_pixels = clip.num_pix * scl_ratio |
544 | */ | 554 | */ |
545 | data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) * | 555 | data->viewport.x = (clip.x - plane_state->dst_rect.x) * |
546 | surf_src.width / plane_state->dst_rect.width; | 556 | surf_src.width / plane_state->dst_rect.width; |
547 | data->viewport.width = clip.width * | 557 | data->viewport.width = clip.width * |
548 | surf_src.width / plane_state->dst_rect.width; | 558 | surf_src.width / plane_state->dst_rect.width; |
549 | 559 | ||
550 | data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) * | 560 | data->viewport.y = (clip.y - plane_state->dst_rect.y) * |
551 | surf_src.height / plane_state->dst_rect.height; | 561 | surf_src.height / plane_state->dst_rect.height; |
552 | data->viewport.height = clip.height * | 562 | data->viewport.height = clip.height * |
553 | surf_src.height / plane_state->dst_rect.height; | 563 | surf_src.height / plane_state->dst_rect.height; |
554 | 564 | ||
555 | /* To transfer the x, y to correct coordinate on mirror image (camera). | 565 | if (flip_vert_scan_dir) |
556 | * deg 0 : transfer x, | 566 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; |
557 | * deg 90 : don't need to transfer, | 567 | if (flip_horz_scan_dir) |
558 | * deg180 : transfer y, | 568 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; |
559 | * deg270 : transfer x and y. | 569 | |
560 | * To transfer the x, y to correct coordinate on non-mirror image (video). | 570 | data->viewport.x += surf_src.x; |
561 | * deg 0 : don't need to transfer, | 571 | data->viewport.y += surf_src.y; |
562 | * deg 90 : transfer y, | ||
563 | * deg180 : transfer x and y, | ||
564 | * deg270 : transfer x. | ||
565 | */ | ||
566 | if (pipe_ctx->plane_state->horizontal_mirror) { | ||
567 | if (flip_horz_scan_dir && !flip_vert_scan_dir) { | ||
568 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
569 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
570 | } else if (flip_horz_scan_dir && flip_vert_scan_dir) | ||
571 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
572 | else { | ||
573 | if (!flip_horz_scan_dir && !flip_vert_scan_dir) | ||
574 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
575 | } | ||
576 | } else { | ||
577 | if (flip_horz_scan_dir) | ||
578 | data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width; | ||
579 | if (flip_vert_scan_dir) | ||
580 | data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height; | ||
581 | } | ||
582 | 572 | ||
583 | /* Round down, compensate in init */ | 573 | /* Round down, compensate in init */ |
584 | data->viewport_c.x = data->viewport.x / vpc_div; | 574 | data->viewport_c.x = data->viewport.x / vpc_div; |
@@ -773,22 +763,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *r | |||
773 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) | 763 | else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) |
774 | flip_horz_scan_dir = true; | 764 | flip_horz_scan_dir = true; |
775 | 765 | ||
766 | if (pipe_ctx->plane_state->horizontal_mirror) | ||
767 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
768 | |||
776 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || | 769 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || |
777 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { | 770 | pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { |
778 | rect_swap_helper(&src); | 771 | rect_swap_helper(&src); |
779 | rect_swap_helper(&data->viewport_c); | 772 | rect_swap_helper(&data->viewport_c); |
780 | rect_swap_helper(&data->viewport); | 773 | rect_swap_helper(&data->viewport); |
781 | 774 | } | |
782 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 && | ||
783 | pipe_ctx->plane_state->horizontal_mirror) { | ||
784 | flip_vert_scan_dir = true; | ||
785 | } | ||
786 | if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 && | ||
787 | pipe_ctx->plane_state->horizontal_mirror) { | ||
788 | flip_vert_scan_dir = false; | ||
789 | } | ||
790 | } else if (pipe_ctx->plane_state->horizontal_mirror) | ||
791 | flip_horz_scan_dir = !flip_horz_scan_dir; | ||
792 | 775 | ||
793 | /* | 776 | /* |
794 | * Init calculated according to formula: | 777 | * Init calculated according to formula: |
@@ -1115,9 +1098,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1115 | pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( | 1098 | pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( |
1116 | pipe_ctx->plane_state->format); | 1099 | pipe_ctx->plane_state->format); |
1117 | 1100 | ||
1118 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1119 | pipe_ctx->stream->dst.height *= 2; | ||
1120 | |||
1121 | calculate_scaling_ratios(pipe_ctx); | 1101 | calculate_scaling_ratios(pipe_ctx); |
1122 | 1102 | ||
1123 | calculate_viewport(pipe_ctx); | 1103 | calculate_viewport(pipe_ctx); |
@@ -1138,9 +1118,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1138 | 1118 | ||
1139 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; | 1119 | pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; |
1140 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; | 1120 | pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; |
1141 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1142 | pipe_ctx->plane_res.scl_data.v_active *= 2; | ||
1143 | |||
1144 | 1121 | ||
1145 | /* Taps calculations */ | 1122 | /* Taps calculations */ |
1146 | if (pipe_ctx->plane_res.xfm != NULL) | 1123 | if (pipe_ctx->plane_res.xfm != NULL) |
@@ -1185,9 +1162,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) | |||
1185 | plane_state->dst_rect.x, | 1162 | plane_state->dst_rect.x, |
1186 | plane_state->dst_rect.y); | 1163 | plane_state->dst_rect.y); |
1187 | 1164 | ||
1188 | if (pipe_ctx->stream->timing.flags.INTERLACE) | ||
1189 | pipe_ctx->stream->dst.height /= 2; | ||
1190 | |||
1191 | return res; | 1165 | return res; |
1192 | } | 1166 | } |
1193 | 1167 | ||
@@ -2071,7 +2045,7 @@ void dc_resource_state_construct( | |||
2071 | const struct dc *dc, | 2045 | const struct dc *dc, |
2072 | struct dc_state *dst_ctx) | 2046 | struct dc_state *dst_ctx) |
2073 | { | 2047 | { |
2074 | dst_ctx->dis_clk = dc->res_pool->dccg; | 2048 | dst_ctx->dccg = dc->res_pool->clk_mgr; |
2075 | } | 2049 | } |
2076 | 2050 | ||
2077 | enum dc_status dc_validate_global_state( | 2051 | enum dc_status dc_validate_global_state( |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 2ac848a106ba..e113439aaa86 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c | |||
@@ -106,6 +106,7 @@ static void construct(struct dc_stream_state *stream, | |||
106 | 106 | ||
107 | stream->out_transfer_func = dc_create_transfer_func(); | 107 | stream->out_transfer_func = dc_create_transfer_func(); |
108 | stream->out_transfer_func->type = TF_TYPE_BYPASS; | 108 | stream->out_transfer_func->type = TF_TYPE_BYPASS; |
109 | stream->out_transfer_func->ctx = stream->ctx; | ||
109 | } | 110 | } |
110 | 111 | ||
111 | static void destruct(struct dc_stream_state *stream) | 112 | static void destruct(struct dc_stream_state *stream) |
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 8fb3aefd195c..c60c9b4c3075 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c | |||
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state | |||
44 | 44 | ||
45 | plane_state->in_transfer_func = dc_create_transfer_func(); | 45 | plane_state->in_transfer_func = dc_create_transfer_func(); |
46 | plane_state->in_transfer_func->type = TF_TYPE_BYPASS; | 46 | plane_state->in_transfer_func->type = TF_TYPE_BYPASS; |
47 | plane_state->in_transfer_func->ctx = ctx; | ||
47 | } | 48 | } |
48 | 49 | ||
49 | static void destruct(struct dc_plane_state *plane_state) | 50 | static void destruct(struct dc_plane_state *plane_state) |
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 199527171100..d16a20c84792 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h | |||
@@ -38,7 +38,7 @@ | |||
38 | #include "inc/compressor.h" | 38 | #include "inc/compressor.h" |
39 | #include "dml/display_mode_lib.h" | 39 | #include "dml/display_mode_lib.h" |
40 | 40 | ||
41 | #define DC_VER "3.1.68" | 41 | #define DC_VER "3.2.04" |
42 | 42 | ||
43 | #define MAX_SURFACES 3 | 43 | #define MAX_SURFACES 3 |
44 | #define MAX_STREAMS 6 | 44 | #define MAX_STREAMS 6 |
@@ -169,6 +169,7 @@ struct link_training_settings; | |||
169 | struct dc_config { | 169 | struct dc_config { |
170 | bool gpu_vm_support; | 170 | bool gpu_vm_support; |
171 | bool disable_disp_pll_sharing; | 171 | bool disable_disp_pll_sharing; |
172 | bool fbc_support; | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | enum visual_confirm { | 175 | enum visual_confirm { |
@@ -249,8 +250,6 @@ struct dc_debug_options { | |||
249 | bool disable_dmcu; | 250 | bool disable_dmcu; |
250 | bool disable_psr; | 251 | bool disable_psr; |
251 | bool force_abm_enable; | 252 | bool force_abm_enable; |
252 | bool disable_hbup_pg; | ||
253 | bool disable_dpp_pg; | ||
254 | bool disable_stereo_support; | 253 | bool disable_stereo_support; |
255 | bool vsr_support; | 254 | bool vsr_support; |
256 | bool performance_trace; | 255 | bool performance_trace; |
@@ -304,11 +303,6 @@ struct dc { | |||
304 | struct hw_sequencer_funcs hwss; | 303 | struct hw_sequencer_funcs hwss; |
305 | struct dce_hwseq *hwseq; | 304 | struct dce_hwseq *hwseq; |
306 | 305 | ||
307 | /* temp store of dm_pp_display_configuration | ||
308 | * to compare to see if display config changed | ||
309 | */ | ||
310 | struct dm_pp_display_configuration prev_display_config; | ||
311 | |||
312 | bool optimized_required; | 306 | bool optimized_required; |
313 | 307 | ||
314 | /* FBC compressor */ | 308 | /* FBC compressor */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 8130b95ccc53..a8b3cedf9431 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h | |||
@@ -86,6 +86,10 @@ struct dc_vbios_funcs { | |||
86 | 86 | ||
87 | bool (*is_accelerated_mode)( | 87 | bool (*is_accelerated_mode)( |
88 | struct dc_bios *bios); | 88 | struct dc_bios *bios); |
89 | bool (*is_active_display)( | ||
90 | struct dc_bios *bios, | ||
91 | enum signal_type signal, | ||
92 | const struct connector_device_tag_info *device_tag); | ||
89 | void (*set_scratch_critical_state)( | 93 | void (*set_scratch_critical_state)( |
90 | struct dc_bios *bios, | 94 | struct dc_bios *bios, |
91 | bool state); | 95 | bool state); |
@@ -141,6 +145,7 @@ struct dc_vbios_funcs { | |||
141 | }; | 145 | }; |
142 | 146 | ||
143 | struct bios_registers { | 147 | struct bios_registers { |
148 | uint32_t BIOS_SCRATCH_0; | ||
144 | uint32_t BIOS_SCRATCH_3; | 149 | uint32_t BIOS_SCRATCH_3; |
145 | uint32_t BIOS_SCRATCH_6; | 150 | uint32_t BIOS_SCRATCH_6; |
146 | }; | 151 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 3bfdccceb524..8738f27a8708 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h | |||
@@ -138,9 +138,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ | |||
138 | return dc->links[link_index]; | 138 | return dc->links[link_index]; |
139 | } | 139 | } |
140 | 140 | ||
141 | /* Set backlight level of an embedded panel (eDP, LVDS). */ | 141 | /* Set backlight level of an embedded panel (eDP, LVDS). |
142 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, | 142 | * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer |
143 | uint32_t frame_ramp, const struct dc_stream_state *stream); | 143 | * and 16 bit fractional, where 1.0 is max backlight value. |
144 | */ | ||
145 | bool dc_link_set_backlight_level(const struct dc_link *dc_link, | ||
146 | uint32_t backlight_pwm_u16_16, | ||
147 | uint32_t frame_ramp, | ||
148 | const struct dc_stream_state *stream); | ||
144 | 149 | ||
145 | int dc_link_get_backlight_level(const struct dc_link *dc_link); | 150 | int dc_link_get_backlight_level(const struct dc_link *dc_link); |
146 | 151 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile index 8f7f0e8b341f..6d7b64a743ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ | 29 | DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \ |
30 | dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ | 30 | dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \ |
31 | dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ | 31 | dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \ |
32 | dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o | 32 | dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o |
33 | 33 | ||
34 | AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) | 34 | AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE)) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 29294db1a96b..2a342eae80fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 | 54 | #define MCP_DISABLE_ABM_IMMEDIATELY 255 |
55 | 55 | ||
56 | 56 | ||
57 | static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) | 57 | static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce) |
58 | { | 58 | { |
59 | uint64_t current_backlight; | 59 | uint64_t current_backlight; |
60 | uint32_t round_result; | 60 | uint32_t round_result; |
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce) | |||
103 | return (uint32_t)(current_backlight); | 103 | return (uint32_t)(current_backlight); |
104 | } | 104 | } |
105 | 105 | ||
106 | static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | 106 | static void driver_set_backlight_level(struct dce_abm *abm_dce, |
107 | uint32_t backlight_pwm_u16_16) | ||
107 | { | 108 | { |
108 | uint32_t backlight_24bit; | ||
109 | uint32_t backlight_17bit; | ||
110 | uint32_t backlight_16bit; | 109 | uint32_t backlight_16bit; |
111 | uint32_t masked_pwm_period; | 110 | uint32_t masked_pwm_period; |
112 | uint8_t rounding_bit; | ||
113 | uint8_t bit_count; | 111 | uint8_t bit_count; |
114 | uint64_t active_duty_cycle; | 112 | uint64_t active_duty_cycle; |
115 | uint32_t pwm_period_bitcnt; | 113 | uint32_t pwm_period_bitcnt; |
116 | 114 | ||
117 | /* | 115 | /* |
118 | * 1. Convert 8-bit value to 17 bit U1.16 format | 116 | * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight |
119 | * (1 integer, 16 fractional bits) | ||
120 | */ | ||
121 | |||
122 | /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value, | ||
123 | * effectively multiplying value by 256/255 | ||
124 | * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF | ||
125 | */ | ||
126 | backlight_24bit = level * 0x10101; | ||
127 | |||
128 | /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8 | ||
129 | * used for rounding, take most significant bit of fraction for | ||
130 | * rounding, e.g. for 0xEFEFEF, rounding bit is 1 | ||
131 | */ | ||
132 | rounding_bit = (backlight_24bit >> 7) & 1; | ||
133 | |||
134 | /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit | ||
135 | * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1 | ||
136 | */ | ||
137 | backlight_17bit = (backlight_24bit >> 8) + rounding_bit; | ||
138 | |||
139 | /* | ||
140 | * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight | ||
141 | * active duty cycle <= backlight period | 117 | * active duty cycle <= backlight period |
142 | */ | 118 | */ |
143 | 119 | ||
144 | /* 2.1 Apply bitmask for backlight period value based on value of BITCNT | 120 | /* 1.1 Apply bitmask for backlight period value based on value of BITCNT |
145 | */ | 121 | */ |
146 | REG_GET_2(BL_PWM_PERIOD_CNTL, | 122 | REG_GET_2(BL_PWM_PERIOD_CNTL, |
147 | BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, | 123 | BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt, |
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
155 | /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ | 131 | /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */ |
156 | masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); | 132 | masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1); |
157 | 133 | ||
158 | /* 2.2 Calculate integer active duty cycle required upper 16 bits | 134 | /* 1.2 Calculate integer active duty cycle required upper 16 bits |
159 | * contain integer component, lower 16 bits contain fractional component | 135 | * contain integer component, lower 16 bits contain fractional component |
160 | * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 | 136 | * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24 |
161 | */ | 137 | */ |
162 | active_duty_cycle = backlight_17bit * masked_pwm_period; | 138 | active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period; |
163 | 139 | ||
164 | /* 2.3 Calculate 16 bit active duty cycle from integer and fractional | 140 | /* 1.3 Calculate 16 bit active duty cycle from integer and fractional |
165 | * components shift by bitCount then mask 16 bits and add rounding bit | 141 | * components shift by bitCount then mask 16 bits and add rounding bit |
166 | * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 | 142 | * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0 |
167 | */ | 143 | */ |
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
170 | backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; | 146 | backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1; |
171 | 147 | ||
172 | /* | 148 | /* |
173 | * 3. Program register with updated value | 149 | * 2. Program register with updated value |
174 | */ | 150 | */ |
175 | 151 | ||
176 | /* 3.1 Lock group 2 backlight registers */ | 152 | /* 2.1 Lock group 2 backlight registers */ |
177 | 153 | ||
178 | REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, | 154 | REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK, |
179 | BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, | 155 | BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1, |
180 | BL_PWM_GRP1_REG_LOCK, 1); | 156 | BL_PWM_GRP1_REG_LOCK, 1); |
181 | 157 | ||
182 | // 3.2 Write new active duty cycle | 158 | // 2.2 Write new active duty cycle |
183 | REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); | 159 | REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit); |
184 | 160 | ||
185 | /* 3.3 Unlock group 2 backlight registers */ | 161 | /* 2.3 Unlock group 2 backlight registers */ |
186 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, | 162 | REG_UPDATE(BL_PWM_GRP1_REG_LOCK, |
187 | BL_PWM_GRP1_REG_LOCK, 0); | 163 | BL_PWM_GRP1_REG_LOCK, 0); |
188 | 164 | ||
189 | /* 5.4.4 Wait for pending bit to be cleared */ | 165 | /* 3 Wait for pending bit to be cleared */ |
190 | REG_WAIT(BL_PWM_GRP1_REG_LOCK, | 166 | REG_WAIT(BL_PWM_GRP1_REG_LOCK, |
191 | BL_PWM_GRP1_REG_UPDATE_PENDING, 0, | 167 | BL_PWM_GRP1_REG_UPDATE_PENDING, 0, |
192 | 1, 10000); | 168 | 1, 10000); |
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level) | |||
194 | 170 | ||
195 | static void dmcu_set_backlight_level( | 171 | static void dmcu_set_backlight_level( |
196 | struct dce_abm *abm_dce, | 172 | struct dce_abm *abm_dce, |
197 | uint32_t level, | 173 | uint32_t backlight_pwm_u16_16, |
198 | uint32_t frame_ramp, | 174 | uint32_t frame_ramp, |
199 | uint32_t controller_id) | 175 | uint32_t controller_id) |
200 | { | 176 | { |
201 | unsigned int backlight_16_bit = (level * 0x10101) >> 8; | 177 | unsigned int backlight_8_bit = 0; |
202 | unsigned int backlight_17_bit = backlight_16_bit + | ||
203 | (((backlight_16_bit & 0x80) >> 7) & 1); | ||
204 | uint32_t rampingBoundary = 0xFFFF; | 178 | uint32_t rampingBoundary = 0xFFFF; |
205 | uint32_t s2; | 179 | uint32_t s2; |
206 | 180 | ||
181 | if (backlight_pwm_u16_16 & 0x10000) | ||
182 | // Check for max backlight condition | ||
183 | backlight_8_bit = 0xFF; | ||
184 | else | ||
185 | // Take MSB of fractional part since backlight is not max | ||
186 | backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF; | ||
187 | |||
207 | /* set ramping boundary */ | 188 | /* set ramping boundary */ |
208 | REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); | 189 | REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary); |
209 | 190 | ||
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level( | |||
220 | 0, 1, 80000); | 201 | 0, 1, 80000); |
221 | 202 | ||
222 | /* setDMCUParam_BL */ | 203 | /* setDMCUParam_BL */ |
223 | REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit); | 204 | REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16); |
224 | 205 | ||
225 | /* write ramp */ | 206 | /* write ramp */ |
226 | if (controller_id == 0) | 207 | if (controller_id == 0) |
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level( | |||
237 | s2 = REG_READ(BIOS_SCRATCH_2); | 218 | s2 = REG_READ(BIOS_SCRATCH_2); |
238 | 219 | ||
239 | s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; | 220 | s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; |
240 | level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> | 221 | backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >> |
241 | ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | 222 | ATOM_S2_CURRENT_BL_LEVEL_SHIFT); |
242 | s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); | 223 | s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT); |
243 | 224 | ||
244 | REG_WRITE(BIOS_SCRATCH_2, s2); | 225 | REG_WRITE(BIOS_SCRATCH_2, s2); |
245 | } | 226 | } |
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level( | |||
247 | static void dce_abm_init(struct abm *abm) | 228 | static void dce_abm_init(struct abm *abm) |
248 | { | 229 | { |
249 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 230 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
250 | unsigned int backlight = get_current_backlight_16_bit(abm_dce); | 231 | unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce); |
251 | 232 | ||
252 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); | 233 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103); |
253 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); | 234 | REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101); |
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm) | |||
284 | ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); | 265 | ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1); |
285 | } | 266 | } |
286 | 267 | ||
287 | static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm) | 268 | static unsigned int dce_abm_get_current_backlight(struct abm *abm) |
288 | { | 269 | { |
289 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 270 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
290 | unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); | 271 | unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL); |
291 | 272 | ||
292 | return (backlight >> 8); | 273 | /* return backlight in hardware format which is unsigned 17 bits, with |
274 | * 1 bit integer and 16 bit fractional | ||
275 | */ | ||
276 | return backlight; | ||
277 | } | ||
278 | |||
279 | static unsigned int dce_abm_get_target_backlight(struct abm *abm) | ||
280 | { | ||
281 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | ||
282 | unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL); | ||
283 | |||
284 | /* return backlight in hardware format which is unsigned 17 bits, with | ||
285 | * 1 bit integer and 16 bit fractional | ||
286 | */ | ||
287 | return backlight; | ||
293 | } | 288 | } |
294 | 289 | ||
295 | static bool dce_abm_set_level(struct abm *abm, uint32_t level) | 290 | static bool dce_abm_set_level(struct abm *abm, uint32_t level) |
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm) | |||
396 | return true; | 391 | return true; |
397 | } | 392 | } |
398 | 393 | ||
399 | static bool dce_abm_set_backlight_level( | 394 | static bool dce_abm_set_backlight_level_pwm( |
400 | struct abm *abm, | 395 | struct abm *abm, |
401 | unsigned int backlight_level, | 396 | unsigned int backlight_pwm_u16_16, |
402 | unsigned int frame_ramp, | 397 | unsigned int frame_ramp, |
403 | unsigned int controller_id, | 398 | unsigned int controller_id, |
404 | bool use_smooth_brightness) | 399 | bool use_smooth_brightness) |
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level( | |||
406 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); | 401 | struct dce_abm *abm_dce = TO_DCE_ABM(abm); |
407 | 402 | ||
408 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", | 403 | DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", |
409 | backlight_level, backlight_level); | 404 | backlight_pwm_u16_16, backlight_pwm_u16_16); |
410 | 405 | ||
411 | /* If DMCU is in reset state, DMCU is uninitialized */ | 406 | /* If DMCU is in reset state, DMCU is uninitialized */ |
412 | if (use_smooth_brightness) | 407 | if (use_smooth_brightness) |
413 | dmcu_set_backlight_level(abm_dce, | 408 | dmcu_set_backlight_level(abm_dce, |
414 | backlight_level, | 409 | backlight_pwm_u16_16, |
415 | frame_ramp, | 410 | frame_ramp, |
416 | controller_id); | 411 | controller_id); |
417 | else | 412 | else |
418 | driver_set_backlight_level(abm_dce, backlight_level); | 413 | driver_set_backlight_level(abm_dce, backlight_pwm_u16_16); |
419 | 414 | ||
420 | return true; | 415 | return true; |
421 | } | 416 | } |
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = { | |||
424 | .abm_init = dce_abm_init, | 419 | .abm_init = dce_abm_init, |
425 | .set_abm_level = dce_abm_set_level, | 420 | .set_abm_level = dce_abm_set_level, |
426 | .init_backlight = dce_abm_init_backlight, | 421 | .init_backlight = dce_abm_init_backlight, |
427 | .set_backlight_level = dce_abm_set_backlight_level, | 422 | .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm, |
428 | .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit, | 423 | .get_current_backlight = dce_abm_get_current_backlight, |
424 | .get_target_backlight = dce_abm_get_target_backlight, | ||
429 | .set_abm_immediate_disable = dce_abm_immediate_disable | 425 | .set_abm_immediate_disable = dce_abm_immediate_disable |
430 | }; | 426 | }; |
431 | 427 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c new file mode 100644 index 000000000000..9a28a04417d1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
@@ -0,0 +1,879 @@ | |||
1 | /* | ||
2 | * Copyright 2012-16 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dce_clk_mgr.h" | ||
27 | |||
28 | #include "reg_helper.h" | ||
29 | #include "dmcu.h" | ||
30 | #include "core_types.h" | ||
31 | #include "dal_asic_id.h" | ||
32 | |||
33 | #define TO_DCE_CLK_MGR(clocks)\ | ||
34 | container_of(clocks, struct dce_clk_mgr, base) | ||
35 | |||
36 | #define REG(reg) \ | ||
37 | (clk_mgr_dce->regs->reg) | ||
38 | |||
39 | #undef FN | ||
40 | #define FN(reg_name, field_name) \ | ||
41 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name | ||
42 | |||
43 | #define CTX \ | ||
44 | clk_mgr_dce->base.ctx | ||
45 | #define DC_LOGGER \ | ||
46 | clk_mgr->ctx->logger | ||
47 | |||
48 | /* Max clock values for each state indexed by "enum clocks_state": */ | ||
49 | static const struct state_dependent_clocks dce80_max_clks_by_state[] = { | ||
50 | /* ClocksStateInvalid - should not be used */ | ||
51 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
52 | /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ | ||
53 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
54 | /* ClocksStateLow */ | ||
55 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, | ||
56 | /* ClocksStateNominal */ | ||
57 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, | ||
58 | /* ClocksStatePerformance */ | ||
59 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; | ||
60 | |||
61 | static const struct state_dependent_clocks dce110_max_clks_by_state[] = { | ||
62 | /*ClocksStateInvalid - should not be used*/ | ||
63 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
64 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
65 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
66 | /*ClocksStateLow*/ | ||
67 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
68 | /*ClocksStateNominal*/ | ||
69 | { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, | ||
70 | /*ClocksStatePerformance*/ | ||
71 | { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; | ||
72 | |||
73 | static const struct state_dependent_clocks dce112_max_clks_by_state[] = { | ||
74 | /*ClocksStateInvalid - should not be used*/ | ||
75 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
76 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
77 | { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, | ||
78 | /*ClocksStateLow*/ | ||
79 | { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, | ||
80 | /*ClocksStateNominal*/ | ||
81 | { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, | ||
82 | /*ClocksStatePerformance*/ | ||
83 | { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; | ||
84 | |||
85 | static const struct state_dependent_clocks dce120_max_clks_by_state[] = { | ||
86 | /*ClocksStateInvalid - should not be used*/ | ||
87 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
88 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
89 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
90 | /*ClocksStateLow*/ | ||
91 | { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, | ||
92 | /*ClocksStateNominal*/ | ||
93 | { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, | ||
94 | /*ClocksStatePerformance*/ | ||
95 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | ||
96 | |||
97 | static int dentist_get_divider_from_did(int did) | ||
98 | { | ||
99 | if (did < DENTIST_BASE_DID_1) | ||
100 | did = DENTIST_BASE_DID_1; | ||
101 | if (did > DENTIST_MAX_DID) | ||
102 | did = DENTIST_MAX_DID; | ||
103 | |||
104 | if (did < DENTIST_BASE_DID_2) { | ||
105 | return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP | ||
106 | * (did - DENTIST_BASE_DID_1); | ||
107 | } else if (did < DENTIST_BASE_DID_3) { | ||
108 | return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP | ||
109 | * (did - DENTIST_BASE_DID_2); | ||
110 | } else if (did < DENTIST_BASE_DID_4) { | ||
111 | return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP | ||
112 | * (did - DENTIST_BASE_DID_3); | ||
113 | } else { | ||
114 | return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP | ||
115 | * (did - DENTIST_BASE_DID_4); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | /* SW will adjust DP REF Clock average value for all purposes | ||
120 | * (DP DTO / DP Audio DTO and DP GTC) | ||
121 | if clock is spread for all cases: | ||
122 | -if SS enabled on DP Ref clock and HW de-spreading enabled with SW | ||
123 | calculations for DS_INCR/DS_MODULO (this is planned to be default case) | ||
124 | -if SS enabled on DP Ref clock and HW de-spreading enabled with HW | ||
125 | calculations (not planned to be used, but average clock should still | ||
126 | be valid) | ||
127 | -if SS enabled on DP Ref clock and HW de-spreading disabled | ||
128 | (should not be case with CIK) then SW should program all rates | ||
129 | generated according to average value (case as with previous ASICs) | ||
130 | */ | ||
131 | static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz) | ||
132 | { | ||
133 | if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) { | ||
134 | struct fixed31_32 ss_percentage = dc_fixpt_div_int( | ||
135 | dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage, | ||
136 | clk_mgr_dce->dprefclk_ss_divider), 200); | ||
137 | struct fixed31_32 adj_dp_ref_clk_khz; | ||
138 | |||
139 | ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); | ||
140 | adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); | ||
141 | dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); | ||
142 | } | ||
143 | return dp_ref_clk_khz; | ||
144 | } | ||
145 | |||
146 | static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) | ||
147 | { | ||
148 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
149 | int dprefclk_wdivider; | ||
150 | int dprefclk_src_sel; | ||
151 | int dp_ref_clk_khz = 600000; | ||
152 | int target_div; | ||
153 | |||
154 | /* ASSERT DP Reference Clock source is from DFS*/ | ||
155 | REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); | ||
156 | ASSERT(dprefclk_src_sel == 0); | ||
157 | |||
158 | /* Read the mmDENTIST_DISPCLK_CNTL to get the currently | ||
159 | * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ | ||
160 | REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); | ||
161 | |||
162 | /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ | ||
163 | target_div = dentist_get_divider_from_did(dprefclk_wdivider); | ||
164 | |||
165 | /* Calculate the current DFS clock, in kHz.*/ | ||
166 | dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR | ||
167 | * clk_mgr_dce->dentist_vco_freq_khz) / target_div; | ||
168 | |||
169 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz); | ||
170 | } | ||
171 | |||
172 | int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr) | ||
173 | { | ||
174 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
175 | |||
176 | return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz); | ||
177 | } | ||
178 | |||
179 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
180 | * may not be programmed yet | ||
181 | */ | ||
182 | static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context) | ||
183 | { | ||
184 | uint32_t max_pix_clk = 0; | ||
185 | int i; | ||
186 | |||
187 | for (i = 0; i < MAX_PIPES; i++) { | ||
188 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
189 | |||
190 | if (pipe_ctx->stream == NULL) | ||
191 | continue; | ||
192 | |||
193 | /* do not check under lay */ | ||
194 | if (pipe_ctx->top_pipe) | ||
195 | continue; | ||
196 | |||
197 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
198 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
199 | |||
200 | /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS | ||
201 | * logic for HBR3 still needs Nominal (0.8V) on VDDC rail | ||
202 | */ | ||
203 | if (dc_is_dp_signal(pipe_ctx->stream->signal) && | ||
204 | pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk) | ||
205 | max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk; | ||
206 | } | ||
207 | |||
208 | return max_pix_clk; | ||
209 | } | ||
210 | |||
211 | static enum dm_pp_clocks_state dce_get_required_clocks_state( | ||
212 | struct clk_mgr *clk_mgr, | ||
213 | struct dc_state *context) | ||
214 | { | ||
215 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
216 | int i; | ||
217 | enum dm_pp_clocks_state low_req_clk; | ||
218 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); | ||
219 | |||
220 | /* Iterate from highest supported to lowest valid state, and update | ||
221 | * lowest RequiredState with the lowest state that satisfies | ||
222 | * all required clocks | ||
223 | */ | ||
224 | for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) | ||
225 | if (context->bw.dce.dispclk_khz > | ||
226 | clk_mgr_dce->max_clks_by_state[i].display_clk_khz | ||
227 | || max_pix_clk > | ||
228 | clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz) | ||
229 | break; | ||
230 | |||
231 | low_req_clk = i + 1; | ||
232 | if (low_req_clk > clk_mgr_dce->max_clks_state) { | ||
233 | /* set max clock state for high phyclock, invalid on exceeding display clock */ | ||
234 | if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz | ||
235 | < context->bw.dce.dispclk_khz) | ||
236 | low_req_clk = DM_PP_CLOCKS_STATE_INVALID; | ||
237 | else | ||
238 | low_req_clk = clk_mgr_dce->max_clks_state; | ||
239 | } | ||
240 | |||
241 | return low_req_clk; | ||
242 | } | ||
243 | |||
244 | static int dce_set_clock( | ||
245 | struct clk_mgr *clk_mgr, | ||
246 | int requested_clk_khz) | ||
247 | { | ||
248 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
249 | struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; | ||
250 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; | ||
251 | int actual_clock = requested_clk_khz; | ||
252 | struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu; | ||
253 | |||
254 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
255 | if (requested_clk_khz > 0) | ||
256 | requested_clk_khz = max(requested_clk_khz, | ||
257 | clk_mgr_dce->dentist_vco_freq_khz / 64); | ||
258 | |||
259 | /* Prepare to program display clock*/ | ||
260 | pxl_clk_params.target_pixel_clock = requested_clk_khz; | ||
261 | pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
262 | |||
263 | if (clk_mgr_dce->dfs_bypass_active) | ||
264 | pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; | ||
265 | |||
266 | bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); | ||
267 | |||
268 | if (clk_mgr_dce->dfs_bypass_active) { | ||
269 | /* Cache the fixed display clock*/ | ||
270 | clk_mgr_dce->dfs_bypass_disp_clk = | ||
271 | pxl_clk_params.dfs_bypass_display_clock; | ||
272 | actual_clock = pxl_clk_params.dfs_bypass_display_clock; | ||
273 | } | ||
274 | |||
275 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
276 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
277 | if (requested_clk_khz == 0) | ||
278 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
279 | |||
280 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7); | ||
281 | |||
282 | return actual_clock; | ||
283 | } | ||
284 | |||
285 | int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz) | ||
286 | { | ||
287 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
288 | struct bp_set_dce_clock_parameters dce_clk_params; | ||
289 | struct dc_bios *bp = clk_mgr->ctx->dc_bios; | ||
290 | struct dc *core_dc = clk_mgr->ctx->dc; | ||
291 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
292 | int actual_clock = requested_clk_khz; | ||
293 | /* Prepare to program display clock*/ | ||
294 | memset(&dce_clk_params, 0, sizeof(dce_clk_params)); | ||
295 | |||
296 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
297 | if (requested_clk_khz > 0) | ||
298 | requested_clk_khz = max(requested_clk_khz, | ||
299 | clk_mgr_dce->dentist_vco_freq_khz / 62); | ||
300 | |||
301 | dce_clk_params.target_clock_frequency = requested_clk_khz; | ||
302 | dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
303 | dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; | ||
304 | |||
305 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
306 | actual_clock = dce_clk_params.target_clock_frequency; | ||
307 | |||
308 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
309 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
310 | if (requested_clk_khz == 0) | ||
311 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
312 | |||
313 | /*Program DP ref Clock*/ | ||
314 | /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ | ||
315 | dce_clk_params.target_clock_frequency = 0; | ||
316 | dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; | ||
317 | if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev)) | ||
318 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = | ||
319 | (dce_clk_params.pll_id == | ||
320 | CLOCK_SOURCE_COMBO_DISPLAY_PLL0); | ||
321 | else | ||
322 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; | ||
323 | |||
324 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
325 | |||
326 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { | ||
327 | if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock) | ||
328 | dmcu->funcs->set_psr_wait_loop(dmcu, | ||
329 | actual_clock / 1000 / 7); | ||
330 | } | ||
331 | |||
332 | clk_mgr_dce->dfs_bypass_disp_clk = actual_clock; | ||
333 | return actual_clock; | ||
334 | } | ||
335 | |||
336 | static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce) | ||
337 | { | ||
338 | struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug; | ||
339 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | ||
340 | struct integrated_info info = { { { 0 } } }; | ||
341 | struct dc_firmware_info fw_info = { { 0 } }; | ||
342 | int i; | ||
343 | |||
344 | if (bp->integrated_info) | ||
345 | info = *bp->integrated_info; | ||
346 | |||
347 | clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq; | ||
348 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | ||
349 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
350 | clk_mgr_dce->dentist_vco_freq_khz = | ||
351 | fw_info.smu_gpu_pll_output_freq; | ||
352 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) | ||
353 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | ||
354 | } | ||
355 | |||
356 | /*update the maximum display clock for each power state*/ | ||
357 | for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { | ||
358 | enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
359 | |||
360 | switch (i) { | ||
361 | case 0: | ||
362 | clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; | ||
363 | break; | ||
364 | |||
365 | case 1: | ||
366 | clk_state = DM_PP_CLOCKS_STATE_LOW; | ||
367 | break; | ||
368 | |||
369 | case 2: | ||
370 | clk_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
371 | break; | ||
372 | |||
373 | case 3: | ||
374 | clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
379 | break; | ||
380 | } | ||
381 | |||
382 | /*Do not allow bad VBIOS/SBIOS to override with invalid values, | ||
383 | * check for > 100MHz*/ | ||
384 | if (info.disp_clk_voltage[i].max_supported_clk >= 100000) | ||
385 | clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz = | ||
386 | info.disp_clk_voltage[i].max_supported_clk; | ||
387 | } | ||
388 | |||
389 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
390 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
391 | clk_mgr_dce->dfs_bypass_enabled = true; | ||
392 | } | ||
393 | |||
394 | void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce) | ||
395 | { | ||
396 | struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios; | ||
397 | int ss_info_num = bp->funcs->get_ss_entry_number( | ||
398 | bp, AS_SIGNAL_TYPE_GPU_PLL); | ||
399 | |||
400 | if (ss_info_num) { | ||
401 | struct spread_spectrum_info info = { { 0 } }; | ||
402 | enum bp_result result = bp->funcs->get_spread_spectrum_info( | ||
403 | bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); | ||
404 | |||
405 | /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS | ||
406 | * even if SS not enabled and in that case | ||
407 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
408 | * that SS is enabled | ||
409 | */ | ||
410 | if (result == BP_RESULT_OK && | ||
411 | info.spread_spectrum_percentage != 0) { | ||
412 | clk_mgr_dce->ss_on_dprefclk = true; | ||
413 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
414 | |||
415 | if (info.type.CENTER_MODE == 0) { | ||
416 | /* TODO: Currently for DP Reference clock we | ||
417 | * need only SS percentage for | ||
418 | * downspread */ | ||
419 | clk_mgr_dce->dprefclk_ss_percentage = | ||
420 | info.spread_spectrum_percentage; | ||
421 | } | ||
422 | |||
423 | return; | ||
424 | } | ||
425 | |||
426 | result = bp->funcs->get_spread_spectrum_info( | ||
427 | bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); | ||
428 | |||
429 | /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS | ||
430 | * even if SS not enabled and in that case | ||
431 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
432 | * that SS is enabled | ||
433 | */ | ||
434 | if (result == BP_RESULT_OK && | ||
435 | info.spread_spectrum_percentage != 0) { | ||
436 | clk_mgr_dce->ss_on_dprefclk = true; | ||
437 | clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
438 | |||
439 | if (info.type.CENTER_MODE == 0) { | ||
440 | /* Currently for DP Reference clock we | ||
441 | * need only SS percentage for | ||
442 | * downspread */ | ||
443 | clk_mgr_dce->dprefclk_ss_percentage = | ||
444 | info.spread_spectrum_percentage; | ||
445 | } | ||
446 | } | ||
447 | } | ||
448 | } | ||
449 | |||
450 | void dce110_fill_display_configs( | ||
451 | const struct dc_state *context, | ||
452 | struct dm_pp_display_configuration *pp_display_cfg) | ||
453 | { | ||
454 | int j; | ||
455 | int num_cfgs = 0; | ||
456 | |||
457 | for (j = 0; j < context->stream_count; j++) { | ||
458 | int k; | ||
459 | |||
460 | const struct dc_stream_state *stream = context->streams[j]; | ||
461 | struct dm_pp_single_disp_config *cfg = | ||
462 | &pp_display_cfg->disp_configs[num_cfgs]; | ||
463 | const struct pipe_ctx *pipe_ctx = NULL; | ||
464 | |||
465 | for (k = 0; k < MAX_PIPES; k++) | ||
466 | if (stream == context->res_ctx.pipe_ctx[k].stream) { | ||
467 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | ||
468 | break; | ||
469 | } | ||
470 | |||
471 | ASSERT(pipe_ctx != NULL); | ||
472 | |||
473 | /* only notify active stream */ | ||
474 | if (stream->dpms_off) | ||
475 | continue; | ||
476 | |||
477 | num_cfgs++; | ||
478 | cfg->signal = pipe_ctx->stream->signal; | ||
479 | cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; | ||
480 | cfg->src_height = stream->src.height; | ||
481 | cfg->src_width = stream->src.width; | ||
482 | cfg->ddi_channel_mapping = | ||
483 | stream->sink->link->ddi_channel_mapping.raw; | ||
484 | cfg->transmitter = | ||
485 | stream->sink->link->link_enc->transmitter; | ||
486 | cfg->link_settings.lane_count = | ||
487 | stream->sink->link->cur_link_settings.lane_count; | ||
488 | cfg->link_settings.link_rate = | ||
489 | stream->sink->link->cur_link_settings.link_rate; | ||
490 | cfg->link_settings.link_spread = | ||
491 | stream->sink->link->cur_link_settings.link_spread; | ||
492 | cfg->sym_clock = stream->phy_pix_clk; | ||
493 | /* Round v_refresh*/ | ||
494 | cfg->v_refresh = stream->timing.pix_clk_khz * 1000; | ||
495 | cfg->v_refresh /= stream->timing.h_total; | ||
496 | cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) | ||
497 | / stream->timing.v_total; | ||
498 | } | ||
499 | |||
500 | pp_display_cfg->display_count = num_cfgs; | ||
501 | } | ||
502 | |||
503 | static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) | ||
504 | { | ||
505 | uint8_t j; | ||
506 | uint32_t min_vertical_blank_time = -1; | ||
507 | |||
508 | for (j = 0; j < context->stream_count; j++) { | ||
509 | struct dc_stream_state *stream = context->streams[j]; | ||
510 | uint32_t vertical_blank_in_pixels = 0; | ||
511 | uint32_t vertical_blank_time = 0; | ||
512 | |||
513 | vertical_blank_in_pixels = stream->timing.h_total * | ||
514 | (stream->timing.v_total | ||
515 | - stream->timing.v_addressable); | ||
516 | |||
517 | vertical_blank_time = vertical_blank_in_pixels | ||
518 | * 1000 / stream->timing.pix_clk_khz; | ||
519 | |||
520 | if (min_vertical_blank_time > vertical_blank_time) | ||
521 | min_vertical_blank_time = vertical_blank_time; | ||
522 | } | ||
523 | |||
524 | return min_vertical_blank_time; | ||
525 | } | ||
526 | |||
527 | static int determine_sclk_from_bounding_box( | ||
528 | const struct dc *dc, | ||
529 | int required_sclk) | ||
530 | { | ||
531 | int i; | ||
532 | |||
533 | /* | ||
534 | * Some asics do not give us sclk levels, so we just report the actual | ||
535 | * required sclk | ||
536 | */ | ||
537 | if (dc->sclk_lvls.num_levels == 0) | ||
538 | return required_sclk; | ||
539 | |||
540 | for (i = 0; i < dc->sclk_lvls.num_levels; i++) { | ||
541 | if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) | ||
542 | return dc->sclk_lvls.clocks_in_khz[i]; | ||
543 | } | ||
544 | /* | ||
545 | * even maximum level could not satisfy requirement, this | ||
546 | * is unexpected at this stage, should have been caught at | ||
547 | * validation time | ||
548 | */ | ||
549 | ASSERT(0); | ||
550 | return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; | ||
551 | } | ||
552 | |||
553 | static void dce_pplib_apply_display_requirements( | ||
554 | struct dc *dc, | ||
555 | struct dc_state *context) | ||
556 | { | ||
557 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
558 | |||
559 | pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); | ||
560 | |||
561 | dce110_fill_display_configs(context, pp_display_cfg); | ||
562 | |||
563 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | ||
564 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
565 | } | ||
566 | |||
567 | static void dce11_pplib_apply_display_requirements( | ||
568 | struct dc *dc, | ||
569 | struct dc_state *context) | ||
570 | { | ||
571 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
572 | |||
573 | pp_display_cfg->all_displays_in_sync = | ||
574 | context->bw.dce.all_displays_in_sync; | ||
575 | pp_display_cfg->nb_pstate_switch_disable = | ||
576 | context->bw.dce.nbp_state_change_enable == false; | ||
577 | pp_display_cfg->cpu_cc6_disable = | ||
578 | context->bw.dce.cpuc_state_change_enable == false; | ||
579 | pp_display_cfg->cpu_pstate_disable = | ||
580 | context->bw.dce.cpup_state_change_enable == false; | ||
581 | pp_display_cfg->cpu_pstate_separation_time = | ||
582 | context->bw.dce.blackout_recovery_time_us; | ||
583 | |||
584 | pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | ||
585 | / MEMORY_TYPE_MULTIPLIER_CZ; | ||
586 | |||
587 | pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( | ||
588 | dc, | ||
589 | context->bw.dce.sclk_khz); | ||
590 | |||
591 | pp_display_cfg->min_engine_clock_deep_sleep_khz | ||
592 | = context->bw.dce.sclk_deep_sleep_khz; | ||
593 | |||
594 | pp_display_cfg->avail_mclk_switch_time_us = | ||
595 | dce110_get_min_vblank_time_us(context); | ||
596 | /* TODO: dce11.2*/ | ||
597 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; | ||
598 | |||
599 | pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
600 | |||
601 | dce110_fill_display_configs(context, pp_display_cfg); | ||
602 | |||
603 | /* TODO: is this still applicable?*/ | ||
604 | if (pp_display_cfg->display_count == 1) { | ||
605 | const struct dc_crtc_timing *timing = | ||
606 | &context->streams[0]->timing; | ||
607 | |||
608 | pp_display_cfg->crtc_index = | ||
609 | pp_display_cfg->disp_configs[0].pipe_idx; | ||
610 | pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz; | ||
611 | } | ||
612 | |||
613 | if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) | ||
614 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
615 | } | ||
616 | |||
617 | static void dce_update_clocks(struct clk_mgr *clk_mgr, | ||
618 | struct dc_state *context, | ||
619 | bool safe_to_lower) | ||
620 | { | ||
621 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
622 | struct dm_pp_power_level_change_request level_change_req; | ||
623 | int unpatched_disp_clk = context->bw.dce.dispclk_khz; | ||
624 | |||
625 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
626 | if (!clk_mgr_dce->dfs_bypass_active) | ||
627 | context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
628 | |||
629 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
630 | /* get max clock state from PPLIB */ | ||
631 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
632 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
633 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
634 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
635 | } | ||
636 | |||
637 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
638 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
639 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
640 | } | ||
641 | dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
642 | |||
643 | context->bw.dce.dispclk_khz = unpatched_disp_clk; | ||
644 | } | ||
645 | |||
646 | static void dce11_update_clocks(struct clk_mgr *clk_mgr, | ||
647 | struct dc_state *context, | ||
648 | bool safe_to_lower) | ||
649 | { | ||
650 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
651 | struct dm_pp_power_level_change_request level_change_req; | ||
652 | |||
653 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
654 | /* get max clock state from PPLIB */ | ||
655 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
656 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
657 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
658 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
659 | } | ||
660 | |||
661 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
662 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
663 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
664 | } | ||
665 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
666 | } | ||
667 | |||
668 | static void dce112_update_clocks(struct clk_mgr *clk_mgr, | ||
669 | struct dc_state *context, | ||
670 | bool safe_to_lower) | ||
671 | { | ||
672 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
673 | struct dm_pp_power_level_change_request level_change_req; | ||
674 | |||
675 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | ||
676 | /* get max clock state from PPLIB */ | ||
677 | if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower) | ||
678 | || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) { | ||
679 | if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req)) | ||
680 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | ||
681 | } | ||
682 | |||
683 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
684 | context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
685 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
686 | } | ||
687 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
688 | } | ||
689 | |||
690 | static void dce12_update_clocks(struct clk_mgr *clk_mgr, | ||
691 | struct dc_state *context, | ||
692 | bool safe_to_lower) | ||
693 | { | ||
694 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | ||
695 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
696 | int max_pix_clk = get_max_pixel_clock_for_all_paths(context); | ||
697 | int unpatched_disp_clk = context->bw.dce.dispclk_khz; | ||
698 | |||
699 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
700 | if (!clk_mgr_dce->dfs_bypass_active) | ||
701 | context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
702 | |||
703 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | ||
704 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; | ||
705 | clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz; | ||
706 | context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | ||
707 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
708 | |||
709 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); | ||
710 | } | ||
711 | |||
712 | if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) { | ||
713 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; | ||
714 | clock_voltage_req.clocks_in_khz = max_pix_clk; | ||
715 | clk_mgr->clks.phyclk_khz = max_pix_clk; | ||
716 | |||
717 | dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req); | ||
718 | } | ||
719 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | ||
720 | |||
721 | context->bw.dce.dispclk_khz = unpatched_disp_clk; | ||
722 | } | ||
723 | |||
724 | static const struct clk_mgr_funcs dce120_funcs = { | ||
725 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
726 | .update_clocks = dce12_update_clocks | ||
727 | }; | ||
728 | |||
729 | static const struct clk_mgr_funcs dce112_funcs = { | ||
730 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
731 | .update_clocks = dce112_update_clocks | ||
732 | }; | ||
733 | |||
734 | static const struct clk_mgr_funcs dce110_funcs = { | ||
735 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
736 | .update_clocks = dce11_update_clocks, | ||
737 | }; | ||
738 | |||
739 | static const struct clk_mgr_funcs dce_funcs = { | ||
740 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
741 | .update_clocks = dce_update_clocks | ||
742 | }; | ||
743 | |||
744 | static void dce_clk_mgr_construct( | ||
745 | struct dce_clk_mgr *clk_mgr_dce, | ||
746 | struct dc_context *ctx, | ||
747 | const struct clk_mgr_registers *regs, | ||
748 | const struct clk_mgr_shift *clk_shift, | ||
749 | const struct clk_mgr_mask *clk_mask) | ||
750 | { | ||
751 | struct clk_mgr *base = &clk_mgr_dce->base; | ||
752 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
753 | |||
754 | base->ctx = ctx; | ||
755 | base->funcs = &dce_funcs; | ||
756 | |||
757 | clk_mgr_dce->regs = regs; | ||
758 | clk_mgr_dce->clk_mgr_shift = clk_shift; | ||
759 | clk_mgr_dce->clk_mgr_mask = clk_mask; | ||
760 | |||
761 | clk_mgr_dce->dfs_bypass_disp_clk = 0; | ||
762 | |||
763 | clk_mgr_dce->dprefclk_ss_percentage = 0; | ||
764 | clk_mgr_dce->dprefclk_ss_divider = 1000; | ||
765 | clk_mgr_dce->ss_on_dprefclk = false; | ||
766 | |||
767 | |||
768 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
769 | clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state; | ||
770 | else | ||
771 | clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
772 | clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; | ||
773 | |||
774 | dce_clock_read_integrated_info(clk_mgr_dce); | ||
775 | dce_clock_read_ss_info(clk_mgr_dce); | ||
776 | } | ||
777 | |||
778 | struct clk_mgr *dce_clk_mgr_create( | ||
779 | struct dc_context *ctx, | ||
780 | const struct clk_mgr_registers *regs, | ||
781 | const struct clk_mgr_shift *clk_shift, | ||
782 | const struct clk_mgr_mask *clk_mask) | ||
783 | { | ||
784 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
785 | |||
786 | if (clk_mgr_dce == NULL) { | ||
787 | BREAK_TO_DEBUGGER(); | ||
788 | return NULL; | ||
789 | } | ||
790 | |||
791 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
792 | dce80_max_clks_by_state, | ||
793 | sizeof(dce80_max_clks_by_state)); | ||
794 | |||
795 | dce_clk_mgr_construct( | ||
796 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
797 | |||
798 | return &clk_mgr_dce->base; | ||
799 | } | ||
800 | |||
801 | struct clk_mgr *dce110_clk_mgr_create( | ||
802 | struct dc_context *ctx, | ||
803 | const struct clk_mgr_registers *regs, | ||
804 | const struct clk_mgr_shift *clk_shift, | ||
805 | const struct clk_mgr_mask *clk_mask) | ||
806 | { | ||
807 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
808 | |||
809 | if (clk_mgr_dce == NULL) { | ||
810 | BREAK_TO_DEBUGGER(); | ||
811 | return NULL; | ||
812 | } | ||
813 | |||
814 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
815 | dce110_max_clks_by_state, | ||
816 | sizeof(dce110_max_clks_by_state)); | ||
817 | |||
818 | dce_clk_mgr_construct( | ||
819 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
820 | |||
821 | clk_mgr_dce->base.funcs = &dce110_funcs; | ||
822 | |||
823 | return &clk_mgr_dce->base; | ||
824 | } | ||
825 | |||
826 | struct clk_mgr *dce112_clk_mgr_create( | ||
827 | struct dc_context *ctx, | ||
828 | const struct clk_mgr_registers *regs, | ||
829 | const struct clk_mgr_shift *clk_shift, | ||
830 | const struct clk_mgr_mask *clk_mask) | ||
831 | { | ||
832 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
833 | |||
834 | if (clk_mgr_dce == NULL) { | ||
835 | BREAK_TO_DEBUGGER(); | ||
836 | return NULL; | ||
837 | } | ||
838 | |||
839 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
840 | dce112_max_clks_by_state, | ||
841 | sizeof(dce112_max_clks_by_state)); | ||
842 | |||
843 | dce_clk_mgr_construct( | ||
844 | clk_mgr_dce, ctx, regs, clk_shift, clk_mask); | ||
845 | |||
846 | clk_mgr_dce->base.funcs = &dce112_funcs; | ||
847 | |||
848 | return &clk_mgr_dce->base; | ||
849 | } | ||
850 | |||
851 | struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx) | ||
852 | { | ||
853 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
854 | |||
855 | if (clk_mgr_dce == NULL) { | ||
856 | BREAK_TO_DEBUGGER(); | ||
857 | return NULL; | ||
858 | } | ||
859 | |||
860 | memcpy(clk_mgr_dce->max_clks_by_state, | ||
861 | dce120_max_clks_by_state, | ||
862 | sizeof(dce120_max_clks_by_state)); | ||
863 | |||
864 | dce_clk_mgr_construct( | ||
865 | clk_mgr_dce, ctx, NULL, NULL, NULL); | ||
866 | |||
867 | clk_mgr_dce->dprefclk_khz = 600000; | ||
868 | clk_mgr_dce->base.funcs = &dce120_funcs; | ||
869 | |||
870 | return &clk_mgr_dce->base; | ||
871 | } | ||
872 | |||
873 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr) | ||
874 | { | ||
875 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr); | ||
876 | |||
877 | kfree(clk_mgr_dce); | ||
878 | *clk_mgr = NULL; | ||
879 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h index 34fdb386c884..046077797416 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | |||
@@ -24,10 +24,13 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | 26 | ||
27 | #ifndef _DCE_CLOCKS_H_ | 27 | #ifndef _DCE_CLK_MGR_H_ |
28 | #define _DCE_CLOCKS_H_ | 28 | #define _DCE_CLK_MGR_H_ |
29 | 29 | ||
30 | #include "display_clock.h" | 30 | #include "clk_mgr.h" |
31 | #include "dccg.h" | ||
32 | |||
33 | #define MEMORY_TYPE_MULTIPLIER_CZ 4 | ||
31 | 34 | ||
32 | #define CLK_COMMON_REG_LIST_DCE_BASE() \ | 35 | #define CLK_COMMON_REG_LIST_DCE_BASE() \ |
33 | .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ | 36 | .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \ |
@@ -53,24 +56,31 @@ | |||
53 | type DENTIST_DISPCLK_WDIVIDER; \ | 56 | type DENTIST_DISPCLK_WDIVIDER; \ |
54 | type DENTIST_DISPCLK_CHG_DONE; | 57 | type DENTIST_DISPCLK_CHG_DONE; |
55 | 58 | ||
56 | struct dccg_shift { | 59 | struct clk_mgr_shift { |
57 | CLK_REG_FIELD_LIST(uint8_t) | 60 | CLK_REG_FIELD_LIST(uint8_t) |
58 | }; | 61 | }; |
59 | 62 | ||
60 | struct dccg_mask { | 63 | struct clk_mgr_mask { |
61 | CLK_REG_FIELD_LIST(uint32_t) | 64 | CLK_REG_FIELD_LIST(uint32_t) |
62 | }; | 65 | }; |
63 | 66 | ||
64 | struct dccg_registers { | 67 | struct clk_mgr_registers { |
65 | uint32_t DPREFCLK_CNTL; | 68 | uint32_t DPREFCLK_CNTL; |
66 | uint32_t DENTIST_DISPCLK_CNTL; | 69 | uint32_t DENTIST_DISPCLK_CNTL; |
67 | }; | 70 | }; |
68 | 71 | ||
69 | struct dce_dccg { | 72 | struct state_dependent_clocks { |
70 | struct dccg base; | 73 | int display_clk_khz; |
71 | const struct dccg_registers *regs; | 74 | int pixel_clk_khz; |
72 | const struct dccg_shift *clk_shift; | 75 | }; |
73 | const struct dccg_mask *clk_mask; | 76 | |
77 | struct dce_clk_mgr { | ||
78 | struct clk_mgr base; | ||
79 | const struct clk_mgr_registers *regs; | ||
80 | const struct clk_mgr_shift *clk_mgr_shift; | ||
81 | const struct clk_mgr_mask *clk_mgr_mask; | ||
82 | |||
83 | struct dccg *dccg; | ||
74 | 84 | ||
75 | struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; | 85 | struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; |
76 | 86 | ||
@@ -91,33 +101,68 @@ struct dce_dccg { | |||
91 | /* DPREFCLK SS percentage Divider (100 or 1000) */ | 101 | /* DPREFCLK SS percentage Divider (100 or 1000) */ |
92 | int dprefclk_ss_divider; | 102 | int dprefclk_ss_divider; |
93 | int dprefclk_khz; | 103 | int dprefclk_khz; |
104 | |||
105 | enum dm_pp_clocks_state max_clks_state; | ||
106 | enum dm_pp_clocks_state cur_min_clks_state; | ||
94 | }; | 107 | }; |
95 | 108 | ||
109 | /* Starting DID for each range */ | ||
110 | enum dentist_base_divider_id { | ||
111 | DENTIST_BASE_DID_1 = 0x08, | ||
112 | DENTIST_BASE_DID_2 = 0x40, | ||
113 | DENTIST_BASE_DID_3 = 0x60, | ||
114 | DENTIST_BASE_DID_4 = 0x7e, | ||
115 | DENTIST_MAX_DID = 0x7f | ||
116 | }; | ||
96 | 117 | ||
97 | struct dccg *dce_dccg_create( | 118 | /* Starting point and step size for each divider range.*/ |
98 | struct dc_context *ctx, | 119 | enum dentist_divider_range { |
99 | const struct dccg_registers *regs, | 120 | DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ |
100 | const struct dccg_shift *clk_shift, | 121 | DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ |
101 | const struct dccg_mask *clk_mask); | 122 | DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ |
123 | DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ | ||
124 | DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ | ||
125 | DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ | ||
126 | DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ | ||
127 | DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ | ||
128 | DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 | ||
129 | }; | ||
130 | |||
131 | static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) | ||
132 | { | ||
133 | return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); | ||
134 | } | ||
135 | |||
136 | void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce); | ||
137 | |||
138 | int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg); | ||
102 | 139 | ||
103 | struct dccg *dce110_dccg_create( | 140 | void dce110_fill_display_configs( |
141 | const struct dc_state *context, | ||
142 | struct dm_pp_display_configuration *pp_display_cfg); | ||
143 | |||
144 | int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz); | ||
145 | |||
146 | struct clk_mgr *dce_clk_mgr_create( | ||
104 | struct dc_context *ctx, | 147 | struct dc_context *ctx, |
105 | const struct dccg_registers *regs, | 148 | const struct clk_mgr_registers *regs, |
106 | const struct dccg_shift *clk_shift, | 149 | const struct clk_mgr_shift *clk_shift, |
107 | const struct dccg_mask *clk_mask); | 150 | const struct clk_mgr_mask *clk_mask); |
108 | 151 | ||
109 | struct dccg *dce112_dccg_create( | 152 | struct clk_mgr *dce110_clk_mgr_create( |
110 | struct dc_context *ctx, | 153 | struct dc_context *ctx, |
111 | const struct dccg_registers *regs, | 154 | const struct clk_mgr_registers *regs, |
112 | const struct dccg_shift *clk_shift, | 155 | const struct clk_mgr_shift *clk_shift, |
113 | const struct dccg_mask *clk_mask); | 156 | const struct clk_mgr_mask *clk_mask); |
114 | 157 | ||
115 | struct dccg *dce120_dccg_create(struct dc_context *ctx); | 158 | struct clk_mgr *dce112_clk_mgr_create( |
159 | struct dc_context *ctx, | ||
160 | const struct clk_mgr_registers *regs, | ||
161 | const struct clk_mgr_shift *clk_shift, | ||
162 | const struct clk_mgr_mask *clk_mask); | ||
116 | 163 | ||
117 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | 164 | struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx); |
118 | struct dccg *dcn1_dccg_create(struct dc_context *ctx); | ||
119 | #endif | ||
120 | 165 | ||
121 | void dce_dccg_destroy(struct dccg **dccg); | 166 | void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr); |
122 | 167 | ||
123 | #endif /* _DCE_CLOCKS_H_ */ | 168 | #endif /* _DCE_CLK_MGR_H_ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c deleted file mode 100644 index d89a097ba936..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ /dev/null | |||
@@ -1,947 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2012-16 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dce_clocks.h" | ||
27 | #include "dm_services.h" | ||
28 | #include "reg_helper.h" | ||
29 | #include "fixed31_32.h" | ||
30 | #include "bios_parser_interface.h" | ||
31 | #include "dc.h" | ||
32 | #include "dmcu.h" | ||
33 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) | ||
34 | #include "dcn_calcs.h" | ||
35 | #endif | ||
36 | #include "core_types.h" | ||
37 | #include "dc_types.h" | ||
38 | #include "dal_asic_id.h" | ||
39 | |||
40 | #define TO_DCE_CLOCKS(clocks)\ | ||
41 | container_of(clocks, struct dce_dccg, base) | ||
42 | |||
43 | #define REG(reg) \ | ||
44 | (clk_dce->regs->reg) | ||
45 | |||
46 | #undef FN | ||
47 | #define FN(reg_name, field_name) \ | ||
48 | clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name | ||
49 | |||
50 | #define CTX \ | ||
51 | clk_dce->base.ctx | ||
52 | #define DC_LOGGER \ | ||
53 | clk->ctx->logger | ||
54 | |||
55 | /* Max clock values for each state indexed by "enum clocks_state": */ | ||
56 | static const struct state_dependent_clocks dce80_max_clks_by_state[] = { | ||
57 | /* ClocksStateInvalid - should not be used */ | ||
58 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
59 | /* ClocksStateUltraLow - not expected to be used for DCE 8.0 */ | ||
60 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
61 | /* ClocksStateLow */ | ||
62 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, | ||
63 | /* ClocksStateNominal */ | ||
64 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, | ||
65 | /* ClocksStatePerformance */ | ||
66 | { .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; | ||
67 | |||
68 | static const struct state_dependent_clocks dce110_max_clks_by_state[] = { | ||
69 | /*ClocksStateInvalid - should not be used*/ | ||
70 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
71 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
72 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
73 | /*ClocksStateLow*/ | ||
74 | { .display_clk_khz = 352000, .pixel_clk_khz = 330000 }, | ||
75 | /*ClocksStateNominal*/ | ||
76 | { .display_clk_khz = 467000, .pixel_clk_khz = 400000 }, | ||
77 | /*ClocksStatePerformance*/ | ||
78 | { .display_clk_khz = 643000, .pixel_clk_khz = 400000 } }; | ||
79 | |||
80 | static const struct state_dependent_clocks dce112_max_clks_by_state[] = { | ||
81 | /*ClocksStateInvalid - should not be used*/ | ||
82 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
83 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
84 | { .display_clk_khz = 389189, .pixel_clk_khz = 346672 }, | ||
85 | /*ClocksStateLow*/ | ||
86 | { .display_clk_khz = 459000, .pixel_clk_khz = 400000 }, | ||
87 | /*ClocksStateNominal*/ | ||
88 | { .display_clk_khz = 667000, .pixel_clk_khz = 600000 }, | ||
89 | /*ClocksStatePerformance*/ | ||
90 | { .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } }; | ||
91 | |||
92 | static const struct state_dependent_clocks dce120_max_clks_by_state[] = { | ||
93 | /*ClocksStateInvalid - should not be used*/ | ||
94 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
95 | /*ClocksStateUltraLow - currently by HW design team not supposed to be used*/ | ||
96 | { .display_clk_khz = 0, .pixel_clk_khz = 0 }, | ||
97 | /*ClocksStateLow*/ | ||
98 | { .display_clk_khz = 460000, .pixel_clk_khz = 400000 }, | ||
99 | /*ClocksStateNominal*/ | ||
100 | { .display_clk_khz = 670000, .pixel_clk_khz = 600000 }, | ||
101 | /*ClocksStatePerformance*/ | ||
102 | { .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } }; | ||
103 | |||
104 | /* Starting DID for each range */ | ||
105 | enum dentist_base_divider_id { | ||
106 | DENTIST_BASE_DID_1 = 0x08, | ||
107 | DENTIST_BASE_DID_2 = 0x40, | ||
108 | DENTIST_BASE_DID_3 = 0x60, | ||
109 | DENTIST_BASE_DID_4 = 0x7e, | ||
110 | DENTIST_MAX_DID = 0x7f | ||
111 | }; | ||
112 | |||
113 | /* Starting point and step size for each divider range.*/ | ||
114 | enum dentist_divider_range { | ||
115 | DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */ | ||
116 | DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */ | ||
117 | DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */ | ||
118 | DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */ | ||
119 | DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */ | ||
120 | DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */ | ||
121 | DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */ | ||
122 | DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */ | ||
123 | DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4 | ||
124 | }; | ||
125 | |||
126 | static int dentist_get_divider_from_did(int did) | ||
127 | { | ||
128 | if (did < DENTIST_BASE_DID_1) | ||
129 | did = DENTIST_BASE_DID_1; | ||
130 | if (did > DENTIST_MAX_DID) | ||
131 | did = DENTIST_MAX_DID; | ||
132 | |||
133 | if (did < DENTIST_BASE_DID_2) { | ||
134 | return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP | ||
135 | * (did - DENTIST_BASE_DID_1); | ||
136 | } else if (did < DENTIST_BASE_DID_3) { | ||
137 | return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP | ||
138 | * (did - DENTIST_BASE_DID_2); | ||
139 | } else if (did < DENTIST_BASE_DID_4) { | ||
140 | return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP | ||
141 | * (did - DENTIST_BASE_DID_3); | ||
142 | } else { | ||
143 | return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP | ||
144 | * (did - DENTIST_BASE_DID_4); | ||
145 | } | ||
146 | } | ||
147 | |||
148 | /* SW will adjust DP REF Clock average value for all purposes | ||
149 | * (DP DTO / DP Audio DTO and DP GTC) | ||
150 | if clock is spread for all cases: | ||
151 | -if SS enabled on DP Ref clock and HW de-spreading enabled with SW | ||
152 | calculations for DS_INCR/DS_MODULO (this is planned to be default case) | ||
153 | -if SS enabled on DP Ref clock and HW de-spreading enabled with HW | ||
154 | calculations (not planned to be used, but average clock should still | ||
155 | be valid) | ||
156 | -if SS enabled on DP Ref clock and HW de-spreading disabled | ||
157 | (should not be case with CIK) then SW should program all rates | ||
158 | generated according to average value (case as with previous ASICs) | ||
159 | */ | ||
160 | static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz) | ||
161 | { | ||
162 | if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) { | ||
163 | struct fixed31_32 ss_percentage = dc_fixpt_div_int( | ||
164 | dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage, | ||
165 | clk_dce->dprefclk_ss_divider), 200); | ||
166 | struct fixed31_32 adj_dp_ref_clk_khz; | ||
167 | |||
168 | ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); | ||
169 | adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz); | ||
170 | dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); | ||
171 | } | ||
172 | return dp_ref_clk_khz; | ||
173 | } | ||
174 | |||
175 | static int dce_get_dp_ref_freq_khz(struct dccg *clk) | ||
176 | { | ||
177 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
178 | int dprefclk_wdivider; | ||
179 | int dprefclk_src_sel; | ||
180 | int dp_ref_clk_khz = 600000; | ||
181 | int target_div; | ||
182 | |||
183 | /* ASSERT DP Reference Clock source is from DFS*/ | ||
184 | REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); | ||
185 | ASSERT(dprefclk_src_sel == 0); | ||
186 | |||
187 | /* Read the mmDENTIST_DISPCLK_CNTL to get the currently | ||
188 | * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ | ||
189 | REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); | ||
190 | |||
191 | /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ | ||
192 | target_div = dentist_get_divider_from_did(dprefclk_wdivider); | ||
193 | |||
194 | /* Calculate the current DFS clock, in kHz.*/ | ||
195 | dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR | ||
196 | * clk_dce->dentist_vco_freq_khz) / target_div; | ||
197 | |||
198 | return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz); | ||
199 | } | ||
200 | |||
201 | static int dce12_get_dp_ref_freq_khz(struct dccg *clk) | ||
202 | { | ||
203 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
204 | |||
205 | return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz); | ||
206 | } | ||
207 | |||
208 | static enum dm_pp_clocks_state dce_get_required_clocks_state( | ||
209 | struct dccg *clk, | ||
210 | struct dc_clocks *req_clocks) | ||
211 | { | ||
212 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
213 | int i; | ||
214 | enum dm_pp_clocks_state low_req_clk; | ||
215 | |||
216 | /* Iterate from highest supported to lowest valid state, and update | ||
217 | * lowest RequiredState with the lowest state that satisfies | ||
218 | * all required clocks | ||
219 | */ | ||
220 | for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--) | ||
221 | if (req_clocks->dispclk_khz > | ||
222 | clk_dce->max_clks_by_state[i].display_clk_khz | ||
223 | || req_clocks->phyclk_khz > | ||
224 | clk_dce->max_clks_by_state[i].pixel_clk_khz) | ||
225 | break; | ||
226 | |||
227 | low_req_clk = i + 1; | ||
228 | if (low_req_clk > clk->max_clks_state) { | ||
229 | /* set max clock state for high phyclock, invalid on exceeding display clock */ | ||
230 | if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz | ||
231 | < req_clocks->dispclk_khz) | ||
232 | low_req_clk = DM_PP_CLOCKS_STATE_INVALID; | ||
233 | else | ||
234 | low_req_clk = clk->max_clks_state; | ||
235 | } | ||
236 | |||
237 | return low_req_clk; | ||
238 | } | ||
239 | |||
240 | static int dce_set_clock( | ||
241 | struct dccg *clk, | ||
242 | int requested_clk_khz) | ||
243 | { | ||
244 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
245 | struct bp_pixel_clock_parameters pxl_clk_params = { 0 }; | ||
246 | struct dc_bios *bp = clk->ctx->dc_bios; | ||
247 | int actual_clock = requested_clk_khz; | ||
248 | |||
249 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
250 | if (requested_clk_khz > 0) | ||
251 | requested_clk_khz = max(requested_clk_khz, | ||
252 | clk_dce->dentist_vco_freq_khz / 64); | ||
253 | |||
254 | /* Prepare to program display clock*/ | ||
255 | pxl_clk_params.target_pixel_clock = requested_clk_khz; | ||
256 | pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
257 | |||
258 | if (clk_dce->dfs_bypass_active) | ||
259 | pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; | ||
260 | |||
261 | bp->funcs->program_display_engine_pll(bp, &pxl_clk_params); | ||
262 | |||
263 | if (clk_dce->dfs_bypass_active) { | ||
264 | /* Cache the fixed display clock*/ | ||
265 | clk_dce->dfs_bypass_disp_clk = | ||
266 | pxl_clk_params.dfs_bypass_display_clock; | ||
267 | actual_clock = pxl_clk_params.dfs_bypass_display_clock; | ||
268 | } | ||
269 | |||
270 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
271 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
272 | if (requested_clk_khz == 0) | ||
273 | clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
274 | return actual_clock; | ||
275 | } | ||
276 | |||
277 | static int dce_psr_set_clock( | ||
278 | struct dccg *clk, | ||
279 | int requested_clk_khz) | ||
280 | { | ||
281 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
282 | struct dc_context *ctx = clk_dce->base.ctx; | ||
283 | struct dc *core_dc = ctx->dc; | ||
284 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
285 | int actual_clk_khz = requested_clk_khz; | ||
286 | |||
287 | actual_clk_khz = dce_set_clock(clk, requested_clk_khz); | ||
288 | |||
289 | dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7); | ||
290 | return actual_clk_khz; | ||
291 | } | ||
292 | |||
293 | static int dce112_set_clock( | ||
294 | struct dccg *clk, | ||
295 | int requested_clk_khz) | ||
296 | { | ||
297 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk); | ||
298 | struct bp_set_dce_clock_parameters dce_clk_params; | ||
299 | struct dc_bios *bp = clk->ctx->dc_bios; | ||
300 | struct dc *core_dc = clk->ctx->dc; | ||
301 | struct dmcu *dmcu = core_dc->res_pool->dmcu; | ||
302 | int actual_clock = requested_clk_khz; | ||
303 | /* Prepare to program display clock*/ | ||
304 | memset(&dce_clk_params, 0, sizeof(dce_clk_params)); | ||
305 | |||
306 | /* Make sure requested clock isn't lower than minimum threshold*/ | ||
307 | if (requested_clk_khz > 0) | ||
308 | requested_clk_khz = max(requested_clk_khz, | ||
309 | clk_dce->dentist_vco_freq_khz / 62); | ||
310 | |||
311 | dce_clk_params.target_clock_frequency = requested_clk_khz; | ||
312 | dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; | ||
313 | dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; | ||
314 | |||
315 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
316 | actual_clock = dce_clk_params.target_clock_frequency; | ||
317 | |||
318 | /* from power down, we need mark the clock state as ClocksStateNominal | ||
319 | * from HWReset, so when resume we will call pplib voltage regulator.*/ | ||
320 | if (requested_clk_khz == 0) | ||
321 | clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
322 | |||
323 | /*Program DP ref Clock*/ | ||
324 | /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ | ||
325 | dce_clk_params.target_clock_frequency = 0; | ||
326 | dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; | ||
327 | if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev)) | ||
328 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = | ||
329 | (dce_clk_params.pll_id == | ||
330 | CLOCK_SOURCE_COMBO_DISPLAY_PLL0); | ||
331 | else | ||
332 | dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; | ||
333 | |||
334 | bp->funcs->set_dce_clock(bp, &dce_clk_params); | ||
335 | |||
336 | if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { | ||
337 | if (clk_dce->dfs_bypass_disp_clk != actual_clock) | ||
338 | dmcu->funcs->set_psr_wait_loop(dmcu, | ||
339 | actual_clock / 1000 / 7); | ||
340 | } | ||
341 | |||
342 | clk_dce->dfs_bypass_disp_clk = actual_clock; | ||
343 | return actual_clock; | ||
344 | } | ||
345 | |||
346 | static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce) | ||
347 | { | ||
348 | struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug; | ||
349 | struct dc_bios *bp = clk_dce->base.ctx->dc_bios; | ||
350 | struct integrated_info info = { { { 0 } } }; | ||
351 | struct dc_firmware_info fw_info = { { 0 } }; | ||
352 | int i; | ||
353 | |||
354 | if (bp->integrated_info) | ||
355 | info = *bp->integrated_info; | ||
356 | |||
357 | clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq; | ||
358 | if (clk_dce->dentist_vco_freq_khz == 0) { | ||
359 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
360 | clk_dce->dentist_vco_freq_khz = | ||
361 | fw_info.smu_gpu_pll_output_freq; | ||
362 | if (clk_dce->dentist_vco_freq_khz == 0) | ||
363 | clk_dce->dentist_vco_freq_khz = 3600000; | ||
364 | } | ||
365 | |||
366 | /*update the maximum display clock for each power state*/ | ||
367 | for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) { | ||
368 | enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
369 | |||
370 | switch (i) { | ||
371 | case 0: | ||
372 | clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW; | ||
373 | break; | ||
374 | |||
375 | case 1: | ||
376 | clk_state = DM_PP_CLOCKS_STATE_LOW; | ||
377 | break; | ||
378 | |||
379 | case 2: | ||
380 | clk_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
381 | break; | ||
382 | |||
383 | case 3: | ||
384 | clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE; | ||
385 | break; | ||
386 | |||
387 | default: | ||
388 | clk_state = DM_PP_CLOCKS_STATE_INVALID; | ||
389 | break; | ||
390 | } | ||
391 | |||
392 | /*Do not allow bad VBIOS/SBIOS to override with invalid values, | ||
393 | * check for > 100MHz*/ | ||
394 | if (info.disp_clk_voltage[i].max_supported_clk >= 100000) | ||
395 | clk_dce->max_clks_by_state[clk_state].display_clk_khz = | ||
396 | info.disp_clk_voltage[i].max_supported_clk; | ||
397 | } | ||
398 | |||
399 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
400 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
401 | clk_dce->dfs_bypass_enabled = true; | ||
402 | } | ||
403 | |||
404 | static void dce_clock_read_ss_info(struct dce_dccg *clk_dce) | ||
405 | { | ||
406 | struct dc_bios *bp = clk_dce->base.ctx->dc_bios; | ||
407 | int ss_info_num = bp->funcs->get_ss_entry_number( | ||
408 | bp, AS_SIGNAL_TYPE_GPU_PLL); | ||
409 | |||
410 | if (ss_info_num) { | ||
411 | struct spread_spectrum_info info = { { 0 } }; | ||
412 | enum bp_result result = bp->funcs->get_spread_spectrum_info( | ||
413 | bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info); | ||
414 | |||
415 | /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS | ||
416 | * even if SS not enabled and in that case | ||
417 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
418 | * that SS is enabled | ||
419 | */ | ||
420 | if (result == BP_RESULT_OK && | ||
421 | info.spread_spectrum_percentage != 0) { | ||
422 | clk_dce->ss_on_dprefclk = true; | ||
423 | clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
424 | |||
425 | if (info.type.CENTER_MODE == 0) { | ||
426 | /* TODO: Currently for DP Reference clock we | ||
427 | * need only SS percentage for | ||
428 | * downspread */ | ||
429 | clk_dce->dprefclk_ss_percentage = | ||
430 | info.spread_spectrum_percentage; | ||
431 | } | ||
432 | |||
433 | return; | ||
434 | } | ||
435 | |||
436 | result = bp->funcs->get_spread_spectrum_info( | ||
437 | bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info); | ||
438 | |||
439 | /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS | ||
440 | * even if SS not enabled and in that case | ||
441 | * SSInfo.spreadSpectrumPercentage !=0 would be sign | ||
442 | * that SS is enabled | ||
443 | */ | ||
444 | if (result == BP_RESULT_OK && | ||
445 | info.spread_spectrum_percentage != 0) { | ||
446 | clk_dce->ss_on_dprefclk = true; | ||
447 | clk_dce->dprefclk_ss_divider = info.spread_percentage_divider; | ||
448 | |||
449 | if (info.type.CENTER_MODE == 0) { | ||
450 | /* Currently for DP Reference clock we | ||
451 | * need only SS percentage for | ||
452 | * downspread */ | ||
453 | clk_dce->dprefclk_ss_percentage = | ||
454 | info.spread_spectrum_percentage; | ||
455 | } | ||
456 | } | ||
457 | } | ||
458 | } | ||
459 | |||
460 | static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk) | ||
461 | { | ||
462 | return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk); | ||
463 | } | ||
464 | |||
465 | static void dce12_update_clocks(struct dccg *dccg, | ||
466 | struct dc_clocks *new_clocks, | ||
467 | bool safe_to_lower) | ||
468 | { | ||
469 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
470 | |||
471 | /* TODO: Investigate why this is needed to fix display corruption. */ | ||
472 | new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; | ||
473 | |||
474 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { | ||
475 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; | ||
476 | clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; | ||
477 | new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
478 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
479 | |||
480 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
481 | } | ||
482 | |||
483 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { | ||
484 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK; | ||
485 | clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz; | ||
486 | dccg->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
487 | |||
488 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
493 | static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks) | ||
494 | { | ||
495 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
496 | bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz; | ||
497 | int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; | ||
498 | bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz; | ||
499 | |||
500 | /* increase clock, looking for div is 0 for current, request div is 1*/ | ||
501 | if (dispclk_increase) { | ||
502 | /* already divided by 2, no need to reach target clk with 2 steps*/ | ||
503 | if (cur_dpp_div) | ||
504 | return new_clocks->dispclk_khz; | ||
505 | |||
506 | /* request disp clk is lower than maximum supported dpp clk, | ||
507 | * no need to reach target clk with two steps. | ||
508 | */ | ||
509 | if (new_clocks->dispclk_khz <= disp_clk_threshold) | ||
510 | return new_clocks->dispclk_khz; | ||
511 | |||
512 | /* target dpp clk not request divided by 2, still within threshold */ | ||
513 | if (!request_dpp_div) | ||
514 | return new_clocks->dispclk_khz; | ||
515 | |||
516 | } else { | ||
517 | /* decrease clock, looking for current dppclk divided by 2, | ||
518 | * request dppclk not divided by 2. | ||
519 | */ | ||
520 | |||
521 | /* current dpp clk not divided by 2, no need to ramp*/ | ||
522 | if (!cur_dpp_div) | ||
523 | return new_clocks->dispclk_khz; | ||
524 | |||
525 | /* current disp clk is lower than current maximum dpp clk, | ||
526 | * no need to ramp | ||
527 | */ | ||
528 | if (dccg->clks.dispclk_khz <= disp_clk_threshold) | ||
529 | return new_clocks->dispclk_khz; | ||
530 | |||
531 | /* request dpp clk need to be divided by 2 */ | ||
532 | if (request_dpp_div) | ||
533 | return new_clocks->dispclk_khz; | ||
534 | } | ||
535 | |||
536 | return disp_clk_threshold; | ||
537 | } | ||
538 | |||
539 | static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks) | ||
540 | { | ||
541 | struct dc *dc = dccg->ctx->dc; | ||
542 | int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks); | ||
543 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
544 | int i; | ||
545 | |||
546 | /* set disp clk to dpp clk threshold */ | ||
547 | dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold); | ||
548 | |||
549 | /* update request dpp clk division option */ | ||
550 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
551 | struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | ||
552 | |||
553 | if (!pipe_ctx->plane_state) | ||
554 | continue; | ||
555 | |||
556 | pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( | ||
557 | pipe_ctx->plane_res.dpp, | ||
558 | request_dpp_div, | ||
559 | true); | ||
560 | } | ||
561 | |||
562 | /* If target clk not same as dppclk threshold, set to target clock */ | ||
563 | if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) | ||
564 | dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
565 | |||
566 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
567 | dccg->clks.dppclk_khz = new_clocks->dppclk_khz; | ||
568 | dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; | ||
569 | } | ||
570 | |||
571 | static void dcn1_update_clocks(struct dccg *dccg, | ||
572 | struct dc_clocks *new_clocks, | ||
573 | bool safe_to_lower) | ||
574 | { | ||
575 | struct dc *dc = dccg->ctx->dc; | ||
576 | struct pp_smu_display_requirement_rv *smu_req_cur = | ||
577 | &dc->res_pool->pp_smu_req; | ||
578 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | ||
579 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
580 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
581 | bool send_request_to_increase = false; | ||
582 | bool send_request_to_lower = false; | ||
583 | |||
584 | if (new_clocks->phyclk_khz) | ||
585 | smu_req.display_count = 1; | ||
586 | else | ||
587 | smu_req.display_count = 0; | ||
588 | |||
589 | if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz | ||
590 | || new_clocks->phyclk_khz > dccg->clks.phyclk_khz | ||
591 | || new_clocks->fclk_khz > dccg->clks.fclk_khz | ||
592 | || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) | ||
593 | send_request_to_increase = true; | ||
594 | |||
595 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) { | ||
596 | dccg->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
597 | |||
598 | send_request_to_lower = true; | ||
599 | } | ||
600 | |||
601 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) { | ||
602 | dccg->clks.fclk_khz = new_clocks->fclk_khz; | ||
603 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; | ||
604 | clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; | ||
605 | smu_req.hard_min_fclk_khz = new_clocks->fclk_khz; | ||
606 | |||
607 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
608 | send_request_to_lower = true; | ||
609 | } | ||
610 | |||
611 | if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) { | ||
612 | dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz; | ||
613 | smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz; | ||
614 | |||
615 | send_request_to_lower = true; | ||
616 | } | ||
617 | |||
618 | if (should_set_clock(safe_to_lower, | ||
619 | new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) { | ||
620 | dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; | ||
621 | smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz; | ||
622 | |||
623 | send_request_to_lower = true; | ||
624 | } | ||
625 | |||
626 | /* make sure dcf clk is before dpp clk to | ||
627 | * make sure we have enough voltage to run dpp clk | ||
628 | */ | ||
629 | if (send_request_to_increase) { | ||
630 | /*use dcfclk to request voltage*/ | ||
631 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
632 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
633 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
634 | if (pp_smu->set_display_requirement) | ||
635 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
636 | } | ||
637 | |||
638 | /* dcn1 dppclk is tied to dispclk */ | ||
639 | /* program dispclk on = as a w/a for sleep resume clock ramping issues */ | ||
640 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz) | ||
641 | || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) { | ||
642 | dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks); | ||
643 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
644 | |||
645 | send_request_to_lower = true; | ||
646 | } | ||
647 | |||
648 | if (!send_request_to_increase && send_request_to_lower) { | ||
649 | /*use dcfclk to request voltage*/ | ||
650 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
651 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
652 | dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); | ||
653 | if (pp_smu->set_display_requirement) | ||
654 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
655 | } | ||
656 | |||
657 | |||
658 | *smu_req_cur = smu_req; | ||
659 | } | ||
660 | #endif | ||
661 | |||
662 | static void dce_update_clocks(struct dccg *dccg, | ||
663 | struct dc_clocks *new_clocks, | ||
664 | bool safe_to_lower) | ||
665 | { | ||
666 | struct dm_pp_power_level_change_request level_change_req; | ||
667 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); | ||
668 | |||
669 | /* TODO: Investigate why this is needed to fix display corruption. */ | ||
670 | if (!clk_dce->dfs_bypass_active) | ||
671 | new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; | ||
672 | |||
673 | level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks); | ||
674 | /* get max clock state from PPLIB */ | ||
675 | if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower) | ||
676 | || level_change_req.power_level > dccg->cur_min_clks_state) { | ||
677 | if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req)) | ||
678 | dccg->cur_min_clks_state = level_change_req.power_level; | ||
679 | } | ||
680 | |||
681 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { | ||
682 | new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); | ||
683 | dccg->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
684 | } | ||
685 | } | ||
686 | |||
687 | static bool dce_update_dfs_bypass( | ||
688 | struct dccg *dccg, | ||
689 | struct dc *dc, | ||
690 | struct dc_state *context, | ||
691 | int requested_clock_khz) | ||
692 | { | ||
693 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); | ||
694 | struct resource_context *res_ctx = &context->res_ctx; | ||
695 | enum signal_type signal_type = SIGNAL_TYPE_NONE; | ||
696 | bool was_active = clk_dce->dfs_bypass_active; | ||
697 | int i; | ||
698 | |||
699 | /* Disable DFS bypass by default. */ | ||
700 | clk_dce->dfs_bypass_active = false; | ||
701 | |||
702 | /* Check that DFS bypass is available. */ | ||
703 | if (!clk_dce->dfs_bypass_enabled) | ||
704 | goto update; | ||
705 | |||
706 | /* Check if the requested display clock is below the threshold. */ | ||
707 | if (requested_clock_khz >= 400000) | ||
708 | goto update; | ||
709 | |||
710 | /* DFS-bypass should only be enabled on single stream setups */ | ||
711 | if (context->stream_count != 1) | ||
712 | goto update; | ||
713 | |||
714 | /* Check that the stream's signal type is an embedded panel */ | ||
715 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
716 | if (res_ctx->pipe_ctx[i].stream) { | ||
717 | struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i]; | ||
718 | |||
719 | signal_type = pipe_ctx->stream->sink->link->connector_signal; | ||
720 | break; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | if (signal_type == SIGNAL_TYPE_EDP || | ||
725 | signal_type == SIGNAL_TYPE_LVDS) | ||
726 | clk_dce->dfs_bypass_active = true; | ||
727 | |||
728 | update: | ||
729 | /* Update the clock state. We don't need to respect safe_to_lower | ||
730 | * because DFS bypass should always be greater than the current | ||
731 | * display clock frequency. | ||
732 | */ | ||
733 | if (was_active != clk_dce->dfs_bypass_active) { | ||
734 | dccg->clks.dispclk_khz = | ||
735 | dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz); | ||
736 | return true; | ||
737 | } | ||
738 | |||
739 | return false; | ||
740 | } | ||
741 | |||
742 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
743 | static const struct display_clock_funcs dcn1_funcs = { | ||
744 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
745 | .set_dispclk = dce112_set_clock, | ||
746 | .update_clocks = dcn1_update_clocks | ||
747 | }; | ||
748 | #endif | ||
749 | |||
750 | static const struct display_clock_funcs dce120_funcs = { | ||
751 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
752 | .set_dispclk = dce112_set_clock, | ||
753 | .update_clocks = dce12_update_clocks | ||
754 | }; | ||
755 | |||
756 | static const struct display_clock_funcs dce112_funcs = { | ||
757 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
758 | .set_dispclk = dce112_set_clock, | ||
759 | .update_clocks = dce_update_clocks | ||
760 | }; | ||
761 | |||
762 | static const struct display_clock_funcs dce110_funcs = { | ||
763 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
764 | .set_dispclk = dce_psr_set_clock, | ||
765 | .update_clocks = dce_update_clocks, | ||
766 | .update_dfs_bypass = dce_update_dfs_bypass | ||
767 | }; | ||
768 | |||
769 | static const struct display_clock_funcs dce_funcs = { | ||
770 | .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz, | ||
771 | .set_dispclk = dce_set_clock, | ||
772 | .update_clocks = dce_update_clocks | ||
773 | }; | ||
774 | |||
775 | static void dce_dccg_construct( | ||
776 | struct dce_dccg *clk_dce, | ||
777 | struct dc_context *ctx, | ||
778 | const struct dccg_registers *regs, | ||
779 | const struct dccg_shift *clk_shift, | ||
780 | const struct dccg_mask *clk_mask) | ||
781 | { | ||
782 | struct dccg *base = &clk_dce->base; | ||
783 | |||
784 | base->ctx = ctx; | ||
785 | base->funcs = &dce_funcs; | ||
786 | |||
787 | clk_dce->regs = regs; | ||
788 | clk_dce->clk_shift = clk_shift; | ||
789 | clk_dce->clk_mask = clk_mask; | ||
790 | |||
791 | clk_dce->dfs_bypass_disp_clk = 0; | ||
792 | |||
793 | clk_dce->dprefclk_ss_percentage = 0; | ||
794 | clk_dce->dprefclk_ss_divider = 1000; | ||
795 | clk_dce->ss_on_dprefclk = false; | ||
796 | |||
797 | base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL; | ||
798 | base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID; | ||
799 | |||
800 | dce_clock_read_integrated_info(clk_dce); | ||
801 | dce_clock_read_ss_info(clk_dce); | ||
802 | } | ||
803 | |||
804 | struct dccg *dce_dccg_create( | ||
805 | struct dc_context *ctx, | ||
806 | const struct dccg_registers *regs, | ||
807 | const struct dccg_shift *clk_shift, | ||
808 | const struct dccg_mask *clk_mask) | ||
809 | { | ||
810 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
811 | |||
812 | if (clk_dce == NULL) { | ||
813 | BREAK_TO_DEBUGGER(); | ||
814 | return NULL; | ||
815 | } | ||
816 | |||
817 | memcpy(clk_dce->max_clks_by_state, | ||
818 | dce80_max_clks_by_state, | ||
819 | sizeof(dce80_max_clks_by_state)); | ||
820 | |||
821 | dce_dccg_construct( | ||
822 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
823 | |||
824 | return &clk_dce->base; | ||
825 | } | ||
826 | |||
827 | struct dccg *dce110_dccg_create( | ||
828 | struct dc_context *ctx, | ||
829 | const struct dccg_registers *regs, | ||
830 | const struct dccg_shift *clk_shift, | ||
831 | const struct dccg_mask *clk_mask) | ||
832 | { | ||
833 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
834 | |||
835 | if (clk_dce == NULL) { | ||
836 | BREAK_TO_DEBUGGER(); | ||
837 | return NULL; | ||
838 | } | ||
839 | |||
840 | memcpy(clk_dce->max_clks_by_state, | ||
841 | dce110_max_clks_by_state, | ||
842 | sizeof(dce110_max_clks_by_state)); | ||
843 | |||
844 | dce_dccg_construct( | ||
845 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
846 | |||
847 | clk_dce->base.funcs = &dce110_funcs; | ||
848 | |||
849 | return &clk_dce->base; | ||
850 | } | ||
851 | |||
852 | struct dccg *dce112_dccg_create( | ||
853 | struct dc_context *ctx, | ||
854 | const struct dccg_registers *regs, | ||
855 | const struct dccg_shift *clk_shift, | ||
856 | const struct dccg_mask *clk_mask) | ||
857 | { | ||
858 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
859 | |||
860 | if (clk_dce == NULL) { | ||
861 | BREAK_TO_DEBUGGER(); | ||
862 | return NULL; | ||
863 | } | ||
864 | |||
865 | memcpy(clk_dce->max_clks_by_state, | ||
866 | dce112_max_clks_by_state, | ||
867 | sizeof(dce112_max_clks_by_state)); | ||
868 | |||
869 | dce_dccg_construct( | ||
870 | clk_dce, ctx, regs, clk_shift, clk_mask); | ||
871 | |||
872 | clk_dce->base.funcs = &dce112_funcs; | ||
873 | |||
874 | return &clk_dce->base; | ||
875 | } | ||
876 | |||
877 | struct dccg *dce120_dccg_create(struct dc_context *ctx) | ||
878 | { | ||
879 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
880 | |||
881 | if (clk_dce == NULL) { | ||
882 | BREAK_TO_DEBUGGER(); | ||
883 | return NULL; | ||
884 | } | ||
885 | |||
886 | memcpy(clk_dce->max_clks_by_state, | ||
887 | dce120_max_clks_by_state, | ||
888 | sizeof(dce120_max_clks_by_state)); | ||
889 | |||
890 | dce_dccg_construct( | ||
891 | clk_dce, ctx, NULL, NULL, NULL); | ||
892 | |||
893 | clk_dce->dprefclk_khz = 600000; | ||
894 | clk_dce->base.funcs = &dce120_funcs; | ||
895 | |||
896 | return &clk_dce->base; | ||
897 | } | ||
898 | |||
899 | #ifdef CONFIG_DRM_AMD_DC_DCN1_0 | ||
900 | struct dccg *dcn1_dccg_create(struct dc_context *ctx) | ||
901 | { | ||
902 | struct dc_debug_options *debug = &ctx->dc->debug; | ||
903 | struct dc_bios *bp = ctx->dc_bios; | ||
904 | struct dc_firmware_info fw_info = { { 0 } }; | ||
905 | struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL); | ||
906 | |||
907 | if (clk_dce == NULL) { | ||
908 | BREAK_TO_DEBUGGER(); | ||
909 | return NULL; | ||
910 | } | ||
911 | |||
912 | clk_dce->base.ctx = ctx; | ||
913 | clk_dce->base.funcs = &dcn1_funcs; | ||
914 | |||
915 | clk_dce->dfs_bypass_disp_clk = 0; | ||
916 | |||
917 | clk_dce->dprefclk_ss_percentage = 0; | ||
918 | clk_dce->dprefclk_ss_divider = 1000; | ||
919 | clk_dce->ss_on_dprefclk = false; | ||
920 | |||
921 | clk_dce->dprefclk_khz = 600000; | ||
922 | if (bp->integrated_info) | ||
923 | clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; | ||
924 | if (clk_dce->dentist_vco_freq_khz == 0) { | ||
925 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
926 | clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; | ||
927 | if (clk_dce->dentist_vco_freq_khz == 0) | ||
928 | clk_dce->dentist_vco_freq_khz = 3600000; | ||
929 | } | ||
930 | |||
931 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
932 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
933 | clk_dce->dfs_bypass_enabled = true; | ||
934 | |||
935 | dce_clock_read_ss_info(clk_dce); | ||
936 | |||
937 | return &clk_dce->base; | ||
938 | } | ||
939 | #endif | ||
940 | |||
941 | void dce_dccg_destroy(struct dccg **dccg) | ||
942 | { | ||
943 | struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg); | ||
944 | |||
945 | kfree(clk_dce); | ||
946 | *dccg = NULL; | ||
947 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index 64dc75378541..c83a7f05f14c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h | |||
@@ -233,6 +233,16 @@ struct dce_hwseq_registers { | |||
233 | uint32_t DOMAIN5_PG_CONFIG; | 233 | uint32_t DOMAIN5_PG_CONFIG; |
234 | uint32_t DOMAIN6_PG_CONFIG; | 234 | uint32_t DOMAIN6_PG_CONFIG; |
235 | uint32_t DOMAIN7_PG_CONFIG; | 235 | uint32_t DOMAIN7_PG_CONFIG; |
236 | uint32_t DOMAIN8_PG_CONFIG; | ||
237 | uint32_t DOMAIN9_PG_CONFIG; | ||
238 | uint32_t DOMAIN10_PG_CONFIG; | ||
239 | uint32_t DOMAIN11_PG_CONFIG; | ||
240 | uint32_t DOMAIN16_PG_CONFIG; | ||
241 | uint32_t DOMAIN17_PG_CONFIG; | ||
242 | uint32_t DOMAIN18_PG_CONFIG; | ||
243 | uint32_t DOMAIN19_PG_CONFIG; | ||
244 | uint32_t DOMAIN20_PG_CONFIG; | ||
245 | uint32_t DOMAIN21_PG_CONFIG; | ||
236 | uint32_t DOMAIN0_PG_STATUS; | 246 | uint32_t DOMAIN0_PG_STATUS; |
237 | uint32_t DOMAIN1_PG_STATUS; | 247 | uint32_t DOMAIN1_PG_STATUS; |
238 | uint32_t DOMAIN2_PG_STATUS; | 248 | uint32_t DOMAIN2_PG_STATUS; |
@@ -241,6 +251,16 @@ struct dce_hwseq_registers { | |||
241 | uint32_t DOMAIN5_PG_STATUS; | 251 | uint32_t DOMAIN5_PG_STATUS; |
242 | uint32_t DOMAIN6_PG_STATUS; | 252 | uint32_t DOMAIN6_PG_STATUS; |
243 | uint32_t DOMAIN7_PG_STATUS; | 253 | uint32_t DOMAIN7_PG_STATUS; |
254 | uint32_t DOMAIN8_PG_STATUS; | ||
255 | uint32_t DOMAIN9_PG_STATUS; | ||
256 | uint32_t DOMAIN10_PG_STATUS; | ||
257 | uint32_t DOMAIN11_PG_STATUS; | ||
258 | uint32_t DOMAIN16_PG_STATUS; | ||
259 | uint32_t DOMAIN17_PG_STATUS; | ||
260 | uint32_t DOMAIN18_PG_STATUS; | ||
261 | uint32_t DOMAIN19_PG_STATUS; | ||
262 | uint32_t DOMAIN20_PG_STATUS; | ||
263 | uint32_t DOMAIN21_PG_STATUS; | ||
244 | uint32_t DIO_MEM_PWR_CTRL; | 264 | uint32_t DIO_MEM_PWR_CTRL; |
245 | uint32_t DCCG_GATE_DISABLE_CNTL; | 265 | uint32_t DCCG_GATE_DISABLE_CNTL; |
246 | uint32_t DCCG_GATE_DISABLE_CNTL2; | 266 | uint32_t DCCG_GATE_DISABLE_CNTL2; |
@@ -262,6 +282,8 @@ struct dce_hwseq_registers { | |||
262 | uint32_t D2VGA_CONTROL; | 282 | uint32_t D2VGA_CONTROL; |
263 | uint32_t D3VGA_CONTROL; | 283 | uint32_t D3VGA_CONTROL; |
264 | uint32_t D4VGA_CONTROL; | 284 | uint32_t D4VGA_CONTROL; |
285 | uint32_t D5VGA_CONTROL; | ||
286 | uint32_t D6VGA_CONTROL; | ||
265 | uint32_t VGA_TEST_CONTROL; | 287 | uint32_t VGA_TEST_CONTROL; |
266 | /* MMHUB registers. read only. temporary hack */ | 288 | /* MMHUB registers. read only. temporary hack */ |
267 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; | 289 | uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32; |
@@ -489,6 +511,26 @@ struct dce_hwseq_registers { | |||
489 | type DOMAIN6_POWER_GATE; \ | 511 | type DOMAIN6_POWER_GATE; \ |
490 | type DOMAIN7_POWER_FORCEON; \ | 512 | type DOMAIN7_POWER_FORCEON; \ |
491 | type DOMAIN7_POWER_GATE; \ | 513 | type DOMAIN7_POWER_GATE; \ |
514 | type DOMAIN8_POWER_FORCEON; \ | ||
515 | type DOMAIN8_POWER_GATE; \ | ||
516 | type DOMAIN9_POWER_FORCEON; \ | ||
517 | type DOMAIN9_POWER_GATE; \ | ||
518 | type DOMAIN10_POWER_FORCEON; \ | ||
519 | type DOMAIN10_POWER_GATE; \ | ||
520 | type DOMAIN11_POWER_FORCEON; \ | ||
521 | type DOMAIN11_POWER_GATE; \ | ||
522 | type DOMAIN16_POWER_FORCEON; \ | ||
523 | type DOMAIN16_POWER_GATE; \ | ||
524 | type DOMAIN17_POWER_FORCEON; \ | ||
525 | type DOMAIN17_POWER_GATE; \ | ||
526 | type DOMAIN18_POWER_FORCEON; \ | ||
527 | type DOMAIN18_POWER_GATE; \ | ||
528 | type DOMAIN19_POWER_FORCEON; \ | ||
529 | type DOMAIN19_POWER_GATE; \ | ||
530 | type DOMAIN20_POWER_FORCEON; \ | ||
531 | type DOMAIN20_POWER_GATE; \ | ||
532 | type DOMAIN21_POWER_FORCEON; \ | ||
533 | type DOMAIN21_POWER_GATE; \ | ||
492 | type DOMAIN0_PGFSM_PWR_STATUS; \ | 534 | type DOMAIN0_PGFSM_PWR_STATUS; \ |
493 | type DOMAIN1_PGFSM_PWR_STATUS; \ | 535 | type DOMAIN1_PGFSM_PWR_STATUS; \ |
494 | type DOMAIN2_PGFSM_PWR_STATUS; \ | 536 | type DOMAIN2_PGFSM_PWR_STATUS; \ |
@@ -497,6 +539,16 @@ struct dce_hwseq_registers { | |||
497 | type DOMAIN5_PGFSM_PWR_STATUS; \ | 539 | type DOMAIN5_PGFSM_PWR_STATUS; \ |
498 | type DOMAIN6_PGFSM_PWR_STATUS; \ | 540 | type DOMAIN6_PGFSM_PWR_STATUS; \ |
499 | type DOMAIN7_PGFSM_PWR_STATUS; \ | 541 | type DOMAIN7_PGFSM_PWR_STATUS; \ |
542 | type DOMAIN8_PGFSM_PWR_STATUS; \ | ||
543 | type DOMAIN9_PGFSM_PWR_STATUS; \ | ||
544 | type DOMAIN10_PGFSM_PWR_STATUS; \ | ||
545 | type DOMAIN11_PGFSM_PWR_STATUS; \ | ||
546 | type DOMAIN16_PGFSM_PWR_STATUS; \ | ||
547 | type DOMAIN17_PGFSM_PWR_STATUS; \ | ||
548 | type DOMAIN18_PGFSM_PWR_STATUS; \ | ||
549 | type DOMAIN19_PGFSM_PWR_STATUS; \ | ||
550 | type DOMAIN20_PGFSM_PWR_STATUS; \ | ||
551 | type DOMAIN21_PGFSM_PWR_STATUS; \ | ||
500 | type DCFCLK_GATE_DIS; \ | 552 | type DCFCLK_GATE_DIS; \ |
501 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ | 553 | type DCHUBBUB_GLOBAL_TIMER_REFDIV; \ |
502 | type VGA_TEST_ENABLE; \ | 554 | type VGA_TEST_ENABLE; \ |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 366bc8c2c643..3e18ea84b1f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | |||
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output( | |||
645 | return false; | 645 | return false; |
646 | 646 | ||
647 | /* DCE11 HW does not support 420 */ | 647 | /* DCE11 HW does not support 420 */ |
648 | if (!enc110->base.features.ycbcr420_supported && | 648 | if (!enc110->base.features.hdmi_ycbcr420_supported && |
649 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 649 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) |
650 | return false; | 650 | return false; |
651 | 651 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c index 74c05e878807..bc50a8e25f4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c | |||
@@ -105,74 +105,18 @@ bool dce100_enable_display_power_gating( | |||
105 | return false; | 105 | return false; |
106 | } | 106 | } |
107 | 107 | ||
108 | static void dce100_pplib_apply_display_requirements( | 108 | void dce100_prepare_bandwidth( |
109 | struct dc *dc, | ||
110 | struct dc_state *context) | ||
111 | { | ||
112 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
113 | |||
114 | pp_display_cfg->avail_mclk_switch_time_us = | ||
115 | dce110_get_min_vblank_time_us(context); | ||
116 | /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | ||
117 | / MEMORY_TYPE_MULTIPLIER;*/ | ||
118 | |||
119 | dce110_fill_display_configs(context, pp_display_cfg); | ||
120 | |||
121 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
122 | struct dm_pp_display_configuration)) != 0) | ||
123 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
124 | |||
125 | dc->prev_display_config = *pp_display_cfg; | ||
126 | } | ||
127 | |||
128 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
129 | * may not be programmed yet | ||
130 | */ | ||
131 | static uint32_t get_max_pixel_clock_for_all_paths( | ||
132 | struct dc *dc, | ||
133 | struct dc_state *context) | ||
134 | { | ||
135 | uint32_t max_pix_clk = 0; | ||
136 | int i; | ||
137 | |||
138 | for (i = 0; i < MAX_PIPES; i++) { | ||
139 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
140 | |||
141 | if (pipe_ctx->stream == NULL) | ||
142 | continue; | ||
143 | |||
144 | /* do not check under lay */ | ||
145 | if (pipe_ctx->top_pipe) | ||
146 | continue; | ||
147 | |||
148 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
149 | max_pix_clk = | ||
150 | pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
151 | } | ||
152 | return max_pix_clk; | ||
153 | } | ||
154 | |||
155 | void dce100_set_bandwidth( | ||
156 | struct dc *dc, | 109 | struct dc *dc, |
157 | struct dc_state *context, | 110 | struct dc_state *context) |
158 | bool decrease_allowed) | ||
159 | { | 111 | { |
160 | struct dc_clocks req_clks; | ||
161 | |||
162 | req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; | ||
163 | req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); | ||
164 | |||
165 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); | 112 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); |
166 | 113 | ||
167 | dc->res_pool->dccg->funcs->update_clocks( | 114 | dc->res_pool->clk_mgr->funcs->update_clocks( |
168 | dc->res_pool->dccg, | 115 | dc->res_pool->clk_mgr, |
169 | &req_clks, | 116 | context, |
170 | decrease_allowed); | 117 | false); |
171 | |||
172 | dce100_pplib_apply_display_requirements(dc, context); | ||
173 | } | 118 | } |
174 | 119 | ||
175 | |||
176 | /**************************************************************************/ | 120 | /**************************************************************************/ |
177 | 121 | ||
178 | void dce100_hw_sequencer_construct(struct dc *dc) | 122 | void dce100_hw_sequencer_construct(struct dc *dc) |
@@ -180,8 +124,7 @@ void dce100_hw_sequencer_construct(struct dc *dc) | |||
180 | dce110_hw_sequencer_construct(dc); | 124 | dce110_hw_sequencer_construct(dc); |
181 | 125 | ||
182 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 126 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
183 | dc->hwss.set_bandwidth = dce100_set_bandwidth; | 127 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
184 | dc->hwss.pplib_apply_display_requirements = | 128 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; |
185 | dce100_pplib_apply_display_requirements; | ||
186 | } | 129 | } |
187 | 130 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index c6ec0ed6ec3d..acd418515346 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h | |||
@@ -33,10 +33,9 @@ struct dc_state; | |||
33 | 33 | ||
34 | void dce100_hw_sequencer_construct(struct dc *dc); | 34 | void dce100_hw_sequencer_construct(struct dc *dc); |
35 | 35 | ||
36 | void dce100_set_bandwidth( | 36 | void dce100_prepare_bandwidth( |
37 | struct dc *dc, | 37 | struct dc *dc, |
38 | struct dc_state *context, | 38 | struct dc_state *context); |
39 | bool decrease_allowed); | ||
40 | 39 | ||
41 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, | 40 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, |
42 | struct dc_bios *dcb, | 41 | struct dc_bios *dcb, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 14754a87156c..6ae51a5dfc04 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c | |||
@@ -36,11 +36,11 @@ | |||
36 | #include "dce/dce_link_encoder.h" | 36 | #include "dce/dce_link_encoder.h" |
37 | #include "dce/dce_stream_encoder.h" | 37 | #include "dce/dce_stream_encoder.h" |
38 | 38 | ||
39 | #include "dce/dce_clk_mgr.h" | ||
39 | #include "dce/dce_mem_input.h" | 40 | #include "dce/dce_mem_input.h" |
40 | #include "dce/dce_ipp.h" | 41 | #include "dce/dce_ipp.h" |
41 | #include "dce/dce_transform.h" | 42 | #include "dce/dce_transform.h" |
42 | #include "dce/dce_opp.h" | 43 | #include "dce/dce_opp.h" |
43 | #include "dce/dce_clocks.h" | ||
44 | #include "dce/dce_clock_source.h" | 44 | #include "dce/dce_clock_source.h" |
45 | #include "dce/dce_audio.h" | 45 | #include "dce/dce_audio.h" |
46 | #include "dce/dce_hwseq.h" | 46 | #include "dce/dce_hwseq.h" |
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = { | |||
137 | .reg_name = mm ## block ## id ## _ ## reg_name | 137 | .reg_name = mm ## block ## id ## _ ## reg_name |
138 | 138 | ||
139 | 139 | ||
140 | static const struct dccg_registers disp_clk_regs = { | 140 | static const struct clk_mgr_registers disp_clk_regs = { |
141 | CLK_COMMON_REG_LIST_DCE_BASE() | 141 | CLK_COMMON_REG_LIST_DCE_BASE() |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static const struct dccg_shift disp_clk_shift = { | 144 | static const struct clk_mgr_shift disp_clk_shift = { |
145 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 145 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static const struct dccg_mask disp_clk_mask = { | 148 | static const struct clk_mgr_mask disp_clk_mask = { |
149 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 149 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
150 | }; | 150 | }; |
151 | 151 | ||
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
722 | dce_aud_destroy(&pool->base.audios[i]); | 722 | dce_aud_destroy(&pool->base.audios[i]); |
723 | } | 723 | } |
724 | 724 | ||
725 | if (pool->base.dccg != NULL) | 725 | if (pool->base.clk_mgr != NULL) |
726 | dce_dccg_destroy(&pool->base.dccg); | 726 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
727 | 727 | ||
728 | if (pool->base.abm != NULL) | 728 | if (pool->base.abm != NULL) |
729 | dce_abm_destroy(&pool->base.abm); | 729 | dce_abm_destroy(&pool->base.abm); |
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth( | |||
767 | if (at_least_one_pipe) { | 767 | if (at_least_one_pipe) { |
768 | /* TODO implement when needed but for now hardcode max value*/ | 768 | /* TODO implement when needed but for now hardcode max value*/ |
769 | context->bw.dce.dispclk_khz = 681000; | 769 | context->bw.dce.dispclk_khz = 681000; |
770 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | 770 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; |
771 | } else { | 771 | } else { |
772 | context->bw.dce.dispclk_khz = 0; | 772 | context->bw.dce.dispclk_khz = 0; |
773 | context->bw.dce.yclk_khz = 0; | 773 | context->bw.dce.yclk_khz = 0; |
@@ -860,7 +860,6 @@ static bool construct( | |||
860 | struct dc_context *ctx = dc->ctx; | 860 | struct dc_context *ctx = dc->ctx; |
861 | struct dc_firmware_info info; | 861 | struct dc_firmware_info info; |
862 | struct dc_bios *bp; | 862 | struct dc_bios *bp; |
863 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
864 | 863 | ||
865 | ctx->dc_bios->regs = &bios_regs; | 864 | ctx->dc_bios->regs = &bios_regs; |
866 | 865 | ||
@@ -908,11 +907,11 @@ static bool construct( | |||
908 | } | 907 | } |
909 | } | 908 | } |
910 | 909 | ||
911 | pool->base.dccg = dce_dccg_create(ctx, | 910 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
912 | &disp_clk_regs, | 911 | &disp_clk_regs, |
913 | &disp_clk_shift, | 912 | &disp_clk_shift, |
914 | &disp_clk_mask); | 913 | &disp_clk_mask); |
915 | if (pool->base.dccg == NULL) { | 914 | if (pool->base.clk_mgr == NULL) { |
916 | dm_error("DC: failed to create display clock!\n"); | 915 | dm_error("DC: failed to create display clock!\n"); |
917 | BREAK_TO_DEBUGGER(); | 916 | BREAK_TO_DEBUGGER(); |
918 | goto res_create_fail; | 917 | goto res_create_fail; |
@@ -938,12 +937,6 @@ static bool construct( | |||
938 | goto res_create_fail; | 937 | goto res_create_fail; |
939 | } | 938 | } |
940 | 939 | ||
941 | /* get static clock information for PPLIB or firmware, save | ||
942 | * max_clock_state | ||
943 | */ | ||
944 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
945 | pool->base.dccg->max_clks_state = | ||
946 | static_clk_info.max_clocks_state; | ||
947 | { | 940 | { |
948 | struct irq_service_init_data init_data; | 941 | struct irq_service_init_data init_data; |
949 | init_data.ctx = dc->ctx; | 942 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b75ede5f84f7..9724a17e352b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | |||
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, | |||
548 | 548 | ||
549 | regamma_params->hw_points_num = hw_points; | 549 | regamma_params->hw_points_num = hw_points; |
550 | 550 | ||
551 | i = 1; | 551 | k = 0; |
552 | for (k = 0; k < 16 && i < 16; k++) { | 552 | for (i = 1; i < 16; i++) { |
553 | if (seg_distr[k] != -1) { | 553 | if (seg_distr[k] != -1) { |
554 | regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; | 554 | regamma_params->arr_curve_points[k].segments_num = seg_distr[k]; |
555 | regamma_params->arr_curve_points[i].offset = | 555 | regamma_params->arr_curve_points[i].offset = |
556 | regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); | 556 | regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]); |
557 | } | 557 | } |
558 | i++; | 558 | k++; |
559 | } | 559 | } |
560 | 560 | ||
561 | if (seg_distr[k] != -1) | 561 | if (seg_distr[k] != -1) |
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, | |||
1085 | 1085 | ||
1086 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { | 1086 | if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { |
1087 | link->dc->hwss.edp_backlight_control(link, true); | 1087 | link->dc->hwss.edp_backlight_control(link, true); |
1088 | stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL; | ||
1089 | } | 1088 | } |
1090 | } | 1089 | } |
1091 | void dce110_blank_stream(struct pipe_ctx *pipe_ctx) | 1090 | void dce110_blank_stream(struct pipe_ctx *pipe_ctx) |
@@ -1192,8 +1191,8 @@ static void build_audio_output( | |||
1192 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || | 1191 | if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || |
1193 | pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { | 1192 | pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { |
1194 | audio_output->pll_info.dp_dto_source_clock_in_khz = | 1193 | audio_output->pll_info.dp_dto_source_clock_in_khz = |
1195 | state->dis_clk->funcs->get_dp_ref_clk_frequency( | 1194 | state->dccg->funcs->get_dp_ref_clk_frequency( |
1196 | state->dis_clk); | 1195 | state->dccg); |
1197 | } | 1196 | } |
1198 | 1197 | ||
1199 | audio_output->pll_info.feed_back_divider = | 1198 | audio_output->pll_info.feed_back_divider = |
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) | |||
1547 | int i; | 1546 | int i; |
1548 | struct dc_link *edp_link_to_turnoff = NULL; | 1547 | struct dc_link *edp_link_to_turnoff = NULL; |
1549 | struct dc_link *edp_link = get_link_for_edp(dc); | 1548 | struct dc_link *edp_link = get_link_for_edp(dc); |
1549 | struct dc_bios *bios = dc->ctx->dc_bios; | ||
1550 | bool can_edp_fast_boot_optimize = false; | 1550 | bool can_edp_fast_boot_optimize = false; |
1551 | bool apply_edp_fast_boot_optimization = false; | 1551 | bool apply_edp_fast_boot_optimization = false; |
1552 | 1552 | ||
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) | |||
1573 | if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { | 1573 | if (context->streams[i]->signal == SIGNAL_TYPE_EDP) { |
1574 | context->streams[i]->apply_edp_fast_boot_optimization = true; | 1574 | context->streams[i]->apply_edp_fast_boot_optimization = true; |
1575 | apply_edp_fast_boot_optimization = true; | 1575 | apply_edp_fast_boot_optimization = true; |
1576 | |||
1577 | /* When after S4 and S5, vbios may post edp and previous dpms_off | ||
1578 | * doesn't make sense. | ||
1579 | * Update dpms_off state to align hw and sw state via check | ||
1580 | * vBios scratch register. | ||
1581 | */ | ||
1582 | if (bios->funcs->is_active_display) { | ||
1583 | const struct connector_device_tag_info *device_tag = &(edp_link->device_tag); | ||
1584 | |||
1585 | if (bios->funcs->is_active_display(bios, | ||
1586 | context->streams[i]->signal, | ||
1587 | device_tag)) | ||
1588 | context->streams[i]->dpms_off = false; | ||
1589 | } | ||
1576 | } | 1590 | } |
1577 | } | 1591 | } |
1578 | } | 1592 | } |
@@ -1736,41 +1750,18 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, | |||
1736 | if (events->force_trigger) | 1750 | if (events->force_trigger) |
1737 | value |= 0x1; | 1751 | value |= 0x1; |
1738 | 1752 | ||
1739 | value |= 0x84; | 1753 | if (num_pipes) { |
1754 | struct dc *dc = pipe_ctx[0]->stream->ctx->dc; | ||
1755 | |||
1756 | if (dc->fbc_compressor) | ||
1757 | value |= 0x84; | ||
1758 | } | ||
1740 | 1759 | ||
1741 | for (i = 0; i < num_pipes; i++) | 1760 | for (i = 0; i < num_pipes; i++) |
1742 | pipe_ctx[i]->stream_res.tg->funcs-> | 1761 | pipe_ctx[i]->stream_res.tg->funcs-> |
1743 | set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); | 1762 | set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); |
1744 | } | 1763 | } |
1745 | 1764 | ||
1746 | /* unit: in_khz before mode set, get pixel clock from context. ASIC register | ||
1747 | * may not be programmed yet | ||
1748 | */ | ||
1749 | static uint32_t get_max_pixel_clock_for_all_paths( | ||
1750 | struct dc *dc, | ||
1751 | struct dc_state *context) | ||
1752 | { | ||
1753 | uint32_t max_pix_clk = 0; | ||
1754 | int i; | ||
1755 | |||
1756 | for (i = 0; i < MAX_PIPES; i++) { | ||
1757 | struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; | ||
1758 | |||
1759 | if (pipe_ctx->stream == NULL) | ||
1760 | continue; | ||
1761 | |||
1762 | /* do not check under lay */ | ||
1763 | if (pipe_ctx->top_pipe) | ||
1764 | continue; | ||
1765 | |||
1766 | if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk) | ||
1767 | max_pix_clk = | ||
1768 | pipe_ctx->stream_res.pix_clk_params.requested_pix_clk; | ||
1769 | } | ||
1770 | |||
1771 | return max_pix_clk; | ||
1772 | } | ||
1773 | |||
1774 | /* | 1765 | /* |
1775 | * Check if FBC can be enabled | 1766 | * Check if FBC can be enabled |
1776 | */ | 1767 | */ |
@@ -2380,191 +2371,33 @@ static void init_hw(struct dc *dc) | |||
2380 | 2371 | ||
2381 | } | 2372 | } |
2382 | 2373 | ||
2383 | void dce110_fill_display_configs( | ||
2384 | const struct dc_state *context, | ||
2385 | struct dm_pp_display_configuration *pp_display_cfg) | ||
2386 | { | ||
2387 | int j; | ||
2388 | int num_cfgs = 0; | ||
2389 | |||
2390 | for (j = 0; j < context->stream_count; j++) { | ||
2391 | int k; | ||
2392 | |||
2393 | const struct dc_stream_state *stream = context->streams[j]; | ||
2394 | struct dm_pp_single_disp_config *cfg = | ||
2395 | &pp_display_cfg->disp_configs[num_cfgs]; | ||
2396 | const struct pipe_ctx *pipe_ctx = NULL; | ||
2397 | |||
2398 | for (k = 0; k < MAX_PIPES; k++) | ||
2399 | if (stream == context->res_ctx.pipe_ctx[k].stream) { | ||
2400 | pipe_ctx = &context->res_ctx.pipe_ctx[k]; | ||
2401 | break; | ||
2402 | } | ||
2403 | |||
2404 | ASSERT(pipe_ctx != NULL); | ||
2405 | |||
2406 | /* only notify active stream */ | ||
2407 | if (stream->dpms_off) | ||
2408 | continue; | ||
2409 | |||
2410 | num_cfgs++; | ||
2411 | cfg->signal = pipe_ctx->stream->signal; | ||
2412 | cfg->pipe_idx = pipe_ctx->stream_res.tg->inst; | ||
2413 | cfg->src_height = stream->src.height; | ||
2414 | cfg->src_width = stream->src.width; | ||
2415 | cfg->ddi_channel_mapping = | ||
2416 | stream->sink->link->ddi_channel_mapping.raw; | ||
2417 | cfg->transmitter = | ||
2418 | stream->sink->link->link_enc->transmitter; | ||
2419 | cfg->link_settings.lane_count = | ||
2420 | stream->sink->link->cur_link_settings.lane_count; | ||
2421 | cfg->link_settings.link_rate = | ||
2422 | stream->sink->link->cur_link_settings.link_rate; | ||
2423 | cfg->link_settings.link_spread = | ||
2424 | stream->sink->link->cur_link_settings.link_spread; | ||
2425 | cfg->sym_clock = stream->phy_pix_clk; | ||
2426 | /* Round v_refresh*/ | ||
2427 | cfg->v_refresh = stream->timing.pix_clk_khz * 1000; | ||
2428 | cfg->v_refresh /= stream->timing.h_total; | ||
2429 | cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) | ||
2430 | / stream->timing.v_total; | ||
2431 | } | ||
2432 | |||
2433 | pp_display_cfg->display_count = num_cfgs; | ||
2434 | } | ||
2435 | |||
2436 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) | ||
2437 | { | ||
2438 | uint8_t j; | ||
2439 | uint32_t min_vertical_blank_time = -1; | ||
2440 | |||
2441 | for (j = 0; j < context->stream_count; j++) { | ||
2442 | struct dc_stream_state *stream = context->streams[j]; | ||
2443 | uint32_t vertical_blank_in_pixels = 0; | ||
2444 | uint32_t vertical_blank_time = 0; | ||
2445 | |||
2446 | vertical_blank_in_pixels = stream->timing.h_total * | ||
2447 | (stream->timing.v_total | ||
2448 | - stream->timing.v_addressable); | ||
2449 | |||
2450 | vertical_blank_time = vertical_blank_in_pixels | ||
2451 | * 1000 / stream->timing.pix_clk_khz; | ||
2452 | |||
2453 | if (min_vertical_blank_time > vertical_blank_time) | ||
2454 | min_vertical_blank_time = vertical_blank_time; | ||
2455 | } | ||
2456 | |||
2457 | return min_vertical_blank_time; | ||
2458 | } | ||
2459 | |||
2460 | static int determine_sclk_from_bounding_box( | ||
2461 | const struct dc *dc, | ||
2462 | int required_sclk) | ||
2463 | { | ||
2464 | int i; | ||
2465 | 2374 | ||
2466 | /* | 2375 | void dce110_prepare_bandwidth( |
2467 | * Some asics do not give us sclk levels, so we just report the actual | 2376 | struct dc *dc, |
2468 | * required sclk | 2377 | struct dc_state *context) |
2469 | */ | ||
2470 | if (dc->sclk_lvls.num_levels == 0) | ||
2471 | return required_sclk; | ||
2472 | |||
2473 | for (i = 0; i < dc->sclk_lvls.num_levels; i++) { | ||
2474 | if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk) | ||
2475 | return dc->sclk_lvls.clocks_in_khz[i]; | ||
2476 | } | ||
2477 | /* | ||
2478 | * even maximum level could not satisfy requirement, this | ||
2479 | * is unexpected at this stage, should have been caught at | ||
2480 | * validation time | ||
2481 | */ | ||
2482 | ASSERT(0); | ||
2483 | return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1]; | ||
2484 | } | ||
2485 | |||
2486 | static void pplib_apply_display_requirements( | ||
2487 | struct dc *dc, | ||
2488 | struct dc_state *context) | ||
2489 | { | 2378 | { |
2490 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | 2379 | struct clk_mgr *dccg = dc->res_pool->clk_mgr; |
2491 | 2380 | ||
2492 | pp_display_cfg->all_displays_in_sync = | 2381 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); |
2493 | context->bw.dce.all_displays_in_sync; | ||
2494 | pp_display_cfg->nb_pstate_switch_disable = | ||
2495 | context->bw.dce.nbp_state_change_enable == false; | ||
2496 | pp_display_cfg->cpu_cc6_disable = | ||
2497 | context->bw.dce.cpuc_state_change_enable == false; | ||
2498 | pp_display_cfg->cpu_pstate_disable = | ||
2499 | context->bw.dce.cpup_state_change_enable == false; | ||
2500 | pp_display_cfg->cpu_pstate_separation_time = | ||
2501 | context->bw.dce.blackout_recovery_time_us; | ||
2502 | 2382 | ||
2503 | pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz | 2383 | dccg->funcs->update_clocks( |
2504 | / MEMORY_TYPE_MULTIPLIER; | 2384 | dccg, |
2505 | 2385 | context, | |
2506 | pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box( | 2386 | false); |
2507 | dc, | ||
2508 | context->bw.dce.sclk_khz); | ||
2509 | |||
2510 | pp_display_cfg->min_engine_clock_deep_sleep_khz | ||
2511 | = context->bw.dce.sclk_deep_sleep_khz; | ||
2512 | |||
2513 | pp_display_cfg->avail_mclk_switch_time_us = | ||
2514 | dce110_get_min_vblank_time_us(context); | ||
2515 | /* TODO: dce11.2*/ | ||
2516 | pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; | ||
2517 | |||
2518 | pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; | ||
2519 | |||
2520 | dce110_fill_display_configs(context, pp_display_cfg); | ||
2521 | |||
2522 | /* TODO: is this still applicable?*/ | ||
2523 | if (pp_display_cfg->display_count == 1) { | ||
2524 | const struct dc_crtc_timing *timing = | ||
2525 | &context->streams[0]->timing; | ||
2526 | |||
2527 | pp_display_cfg->crtc_index = | ||
2528 | pp_display_cfg->disp_configs[0].pipe_idx; | ||
2529 | pp_display_cfg->line_time_in_us = timing->h_total * 1000 | ||
2530 | / timing->pix_clk_khz; | ||
2531 | } | ||
2532 | |||
2533 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
2534 | struct dm_pp_display_configuration)) != 0) | ||
2535 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
2536 | |||
2537 | dc->prev_display_config = *pp_display_cfg; | ||
2538 | } | 2387 | } |
2539 | 2388 | ||
2540 | static void dce110_set_bandwidth( | 2389 | void dce110_optimize_bandwidth( |
2541 | struct dc *dc, | 2390 | struct dc *dc, |
2542 | struct dc_state *context, | 2391 | struct dc_state *context) |
2543 | bool decrease_allowed) | ||
2544 | { | 2392 | { |
2545 | struct dc_clocks req_clks; | 2393 | struct clk_mgr *dccg = dc->res_pool->clk_mgr; |
2546 | struct dccg *dccg = dc->res_pool->dccg; | ||
2547 | |||
2548 | req_clks.dispclk_khz = context->bw.dce.dispclk_khz; | ||
2549 | req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); | ||
2550 | |||
2551 | if (decrease_allowed) | ||
2552 | dce110_set_displaymarks(dc, context); | ||
2553 | else | ||
2554 | dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool); | ||
2555 | 2394 | ||
2556 | if (dccg->funcs->update_dfs_bypass) | 2395 | dce110_set_displaymarks(dc, context); |
2557 | dccg->funcs->update_dfs_bypass( | ||
2558 | dccg, | ||
2559 | dc, | ||
2560 | context, | ||
2561 | req_clks.dispclk_khz); | ||
2562 | 2396 | ||
2563 | dccg->funcs->update_clocks( | 2397 | dccg->funcs->update_clocks( |
2564 | dccg, | 2398 | dccg, |
2565 | &req_clks, | 2399 | context, |
2566 | decrease_allowed); | 2400 | true); |
2567 | pplib_apply_display_requirements(dc, context); | ||
2568 | } | 2401 | } |
2569 | 2402 | ||
2570 | static void dce110_program_front_end_for_pipe( | 2403 | static void dce110_program_front_end_for_pipe( |
@@ -2769,28 +2602,6 @@ static void dce110_wait_for_mpcc_disconnect( | |||
2769 | /* do nothing*/ | 2602 | /* do nothing*/ |
2770 | } | 2603 | } |
2771 | 2604 | ||
2772 | static void program_csc_matrix(struct pipe_ctx *pipe_ctx, | ||
2773 | enum dc_color_space colorspace, | ||
2774 | uint16_t *matrix) | ||
2775 | { | ||
2776 | int i; | ||
2777 | struct out_csc_color_matrix tbl_entry; | ||
2778 | |||
2779 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment | ||
2780 | == true) { | ||
2781 | enum dc_color_space color_space = | ||
2782 | pipe_ctx->stream->output_color_space; | ||
2783 | |||
2784 | //uint16_t matrix[12]; | ||
2785 | for (i = 0; i < 12; i++) | ||
2786 | tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i]; | ||
2787 | |||
2788 | tbl_entry.color_space = color_space; | ||
2789 | //tbl_entry.regval = matrix; | ||
2790 | pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry); | ||
2791 | } | ||
2792 | } | ||
2793 | |||
2794 | void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) | 2605 | void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx) |
2795 | { | 2606 | { |
2796 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; | 2607 | struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; |
@@ -2839,13 +2650,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx) | |||
2839 | pipe_ctx->plane_res.xfm, attributes); | 2650 | pipe_ctx->plane_res.xfm, attributes); |
2840 | } | 2651 | } |
2841 | 2652 | ||
2842 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) {} | ||
2843 | |||
2844 | static void optimize_shared_resources(struct dc *dc) {} | ||
2845 | |||
2846 | static const struct hw_sequencer_funcs dce110_funcs = { | 2653 | static const struct hw_sequencer_funcs dce110_funcs = { |
2847 | .program_gamut_remap = program_gamut_remap, | 2654 | .program_gamut_remap = program_gamut_remap, |
2848 | .program_csc_matrix = program_csc_matrix, | ||
2849 | .init_hw = init_hw, | 2655 | .init_hw = init_hw, |
2850 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, | 2656 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, |
2851 | .apply_ctx_for_surface = dce110_apply_ctx_for_surface, | 2657 | .apply_ctx_for_surface = dce110_apply_ctx_for_surface, |
@@ -2868,7 +2674,8 @@ static const struct hw_sequencer_funcs dce110_funcs = { | |||
2868 | .enable_display_power_gating = dce110_enable_display_power_gating, | 2674 | .enable_display_power_gating = dce110_enable_display_power_gating, |
2869 | .disable_plane = dce110_power_down_fe, | 2675 | .disable_plane = dce110_power_down_fe, |
2870 | .pipe_control_lock = dce_pipe_control_lock, | 2676 | .pipe_control_lock = dce_pipe_control_lock, |
2871 | .set_bandwidth = dce110_set_bandwidth, | 2677 | .prepare_bandwidth = dce110_prepare_bandwidth, |
2678 | .optimize_bandwidth = dce110_optimize_bandwidth, | ||
2872 | .set_drr = set_drr, | 2679 | .set_drr = set_drr, |
2873 | .get_position = get_position, | 2680 | .get_position = get_position, |
2874 | .set_static_screen_control = set_static_screen_control, | 2681 | .set_static_screen_control = set_static_screen_control, |
@@ -2877,9 +2684,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { | |||
2877 | .setup_stereo = NULL, | 2684 | .setup_stereo = NULL, |
2878 | .set_avmute = dce110_set_avmute, | 2685 | .set_avmute = dce110_set_avmute, |
2879 | .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, | 2686 | .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, |
2880 | .ready_shared_resources = ready_shared_resources, | ||
2881 | .optimize_shared_resources = optimize_shared_resources, | ||
2882 | .pplib_apply_display_requirements = pplib_apply_display_requirements, | ||
2883 | .edp_backlight_control = hwss_edp_backlight_control, | 2687 | .edp_backlight_control = hwss_edp_backlight_control, |
2884 | .edp_power_control = hwss_edp_power_control, | 2688 | .edp_power_control = hwss_edp_power_control, |
2885 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 2689 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index d6db3dbd9015..cd3e36d52a52 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h | |||
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw( | |||
40 | struct dc_state *context); | 40 | struct dc_state *context); |
41 | 41 | ||
42 | 42 | ||
43 | |||
44 | void dce110_enable_stream(struct pipe_ctx *pipe_ctx); | 43 | void dce110_enable_stream(struct pipe_ctx *pipe_ctx); |
45 | 44 | ||
46 | void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option); | 45 | void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option); |
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks( | |||
64 | struct resource_context *res_ctx, | 63 | struct resource_context *res_ctx, |
65 | const struct resource_pool *pool); | 64 | const struct resource_pool *pool); |
66 | 65 | ||
67 | void dce110_fill_display_configs( | 66 | void dce110_prepare_bandwidth( |
68 | const struct dc_state *context, | 67 | struct dc *dc, |
69 | struct dm_pp_display_configuration *pp_display_cfg); | 68 | struct dc_state *context); |
70 | 69 | ||
71 | uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); | 70 | void dce110_optimize_bandwidth( |
71 | struct dc *dc, | ||
72 | struct dc_state *context); | ||
72 | 73 | ||
73 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); | 74 | void dp_receiver_power_ctrl(struct dc_link *link, bool on); |
74 | 75 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index e3624ca24574..e33d11785b1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "resource.h" | 31 | #include "resource.h" |
32 | #include "dce110/dce110_resource.h" | 32 | #include "dce110/dce110_resource.h" |
33 | 33 | ||
34 | #include "dce/dce_clk_mgr.h" | ||
34 | #include "include/irq_service_interface.h" | 35 | #include "include/irq_service_interface.h" |
35 | #include "dce/dce_audio.h" | 36 | #include "dce/dce_audio.h" |
36 | #include "dce110/dce110_timing_generator.h" | 37 | #include "dce110/dce110_timing_generator.h" |
@@ -45,7 +46,6 @@ | |||
45 | #include "dce110/dce110_transform_v.h" | 46 | #include "dce110/dce110_transform_v.h" |
46 | #include "dce/dce_opp.h" | 47 | #include "dce/dce_opp.h" |
47 | #include "dce110/dce110_opp_v.h" | 48 | #include "dce110/dce110_opp_v.h" |
48 | #include "dce/dce_clocks.h" | ||
49 | #include "dce/dce_clock_source.h" | 49 | #include "dce/dce_clock_source.h" |
50 | #include "dce/dce_hwseq.h" | 50 | #include "dce/dce_hwseq.h" |
51 | #include "dce110/dce110_hw_sequencer.h" | 51 | #include "dce110/dce110_hw_sequencer.h" |
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = { | |||
148 | #define SRI(reg_name, block, id)\ | 148 | #define SRI(reg_name, block, id)\ |
149 | .reg_name = mm ## block ## id ## _ ## reg_name | 149 | .reg_name = mm ## block ## id ## _ ## reg_name |
150 | 150 | ||
151 | static const struct dccg_registers disp_clk_regs = { | 151 | static const struct clk_mgr_registers disp_clk_regs = { |
152 | CLK_COMMON_REG_LIST_DCE_BASE() | 152 | CLK_COMMON_REG_LIST_DCE_BASE() |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static const struct dccg_shift disp_clk_shift = { | 155 | static const struct clk_mgr_shift disp_clk_shift = { |
156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static const struct dccg_mask disp_clk_mask = { | 159 | static const struct clk_mgr_mask disp_clk_mask = { |
160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
161 | }; | 161 | }; |
162 | 162 | ||
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
760 | if (pool->base.dmcu != NULL) | 760 | if (pool->base.dmcu != NULL) |
761 | dce_dmcu_destroy(&pool->base.dmcu); | 761 | dce_dmcu_destroy(&pool->base.dmcu); |
762 | 762 | ||
763 | if (pool->base.dccg != NULL) | 763 | if (pool->base.clk_mgr != NULL) |
764 | dce_dccg_destroy(&pool->base.dccg); | 764 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
765 | 765 | ||
766 | if (pool->base.irqs != NULL) { | 766 | if (pool->base.irqs != NULL) { |
767 | dal_irq_service_destroy(&pool->base.irqs); | 767 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1173 | &clks); | 1173 | &clks); |
1174 | 1174 | ||
1175 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1175 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1176 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); | 1176 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1177 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1177 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1178 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, | 1178 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1179 | 1000); | 1179 | 1000); |
1180 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1180 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1181 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, | 1181 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1182 | 1000); | 1182 | 1000); |
1183 | } | 1183 | } |
1184 | 1184 | ||
@@ -1201,7 +1201,6 @@ static bool construct( | |||
1201 | struct dc_context *ctx = dc->ctx; | 1201 | struct dc_context *ctx = dc->ctx; |
1202 | struct dc_firmware_info info; | 1202 | struct dc_firmware_info info; |
1203 | struct dc_bios *bp; | 1203 | struct dc_bios *bp; |
1204 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1205 | 1204 | ||
1206 | ctx->dc_bios->regs = &bios_regs; | 1205 | ctx->dc_bios->regs = &bios_regs; |
1207 | 1206 | ||
@@ -1257,11 +1256,11 @@ static bool construct( | |||
1257 | } | 1256 | } |
1258 | } | 1257 | } |
1259 | 1258 | ||
1260 | pool->base.dccg = dce110_dccg_create(ctx, | 1259 | pool->base.clk_mgr = dce110_clk_mgr_create(ctx, |
1261 | &disp_clk_regs, | 1260 | &disp_clk_regs, |
1262 | &disp_clk_shift, | 1261 | &disp_clk_shift, |
1263 | &disp_clk_mask); | 1262 | &disp_clk_mask); |
1264 | if (pool->base.dccg == NULL) { | 1263 | if (pool->base.clk_mgr == NULL) { |
1265 | dm_error("DC: failed to create display clock!\n"); | 1264 | dm_error("DC: failed to create display clock!\n"); |
1266 | BREAK_TO_DEBUGGER(); | 1265 | BREAK_TO_DEBUGGER(); |
1267 | goto res_create_fail; | 1266 | goto res_create_fail; |
@@ -1287,13 +1286,6 @@ static bool construct( | |||
1287 | goto res_create_fail; | 1286 | goto res_create_fail; |
1288 | } | 1287 | } |
1289 | 1288 | ||
1290 | /* get static clock information for PPLIB or firmware, save | ||
1291 | * max_clock_state | ||
1292 | */ | ||
1293 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1294 | pool->base.dccg->max_clks_state = | ||
1295 | static_clk_info.max_clocks_state; | ||
1296 | |||
1297 | { | 1289 | { |
1298 | struct irq_service_init_data init_data; | 1290 | struct irq_service_init_data init_data; |
1299 | init_data.ctx = dc->ctx; | 1291 | init_data.ctx = dc->ctx; |
@@ -1362,7 +1354,8 @@ static bool construct( | |||
1362 | pool->base.sw_i2cs[i] = NULL; | 1354 | pool->base.sw_i2cs[i] = NULL; |
1363 | } | 1355 | } |
1364 | 1356 | ||
1365 | dc->fbc_compressor = dce110_compressor_create(ctx); | 1357 | if (dc->config.fbc_support) |
1358 | dc->fbc_compressor = dce110_compressor_create(ctx); | ||
1366 | 1359 | ||
1367 | if (!underlay_create(ctx, &pool->base)) | 1360 | if (!underlay_create(ctx, &pool->base)) |
1368 | goto res_create_fail; | 1361 | goto res_create_fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 3ce79c208ddf..969d4e72dc94 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | |||
@@ -35,6 +35,7 @@ | |||
35 | 35 | ||
36 | #include "irq/dce110/irq_service_dce110.h" | 36 | #include "irq/dce110/irq_service_dce110.h" |
37 | 37 | ||
38 | #include "dce/dce_clk_mgr.h" | ||
38 | #include "dce/dce_mem_input.h" | 39 | #include "dce/dce_mem_input.h" |
39 | #include "dce/dce_transform.h" | 40 | #include "dce/dce_transform.h" |
40 | #include "dce/dce_link_encoder.h" | 41 | #include "dce/dce_link_encoder.h" |
@@ -42,7 +43,6 @@ | |||
42 | #include "dce/dce_audio.h" | 43 | #include "dce/dce_audio.h" |
43 | #include "dce/dce_opp.h" | 44 | #include "dce/dce_opp.h" |
44 | #include "dce/dce_ipp.h" | 45 | #include "dce/dce_ipp.h" |
45 | #include "dce/dce_clocks.h" | ||
46 | #include "dce/dce_clock_source.h" | 46 | #include "dce/dce_clock_source.h" |
47 | 47 | ||
48 | #include "dce/dce_hwseq.h" | 48 | #include "dce/dce_hwseq.h" |
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = { | |||
148 | .reg_name = mm ## block ## id ## _ ## reg_name | 148 | .reg_name = mm ## block ## id ## _ ## reg_name |
149 | 149 | ||
150 | 150 | ||
151 | static const struct dccg_registers disp_clk_regs = { | 151 | static const struct clk_mgr_registers disp_clk_regs = { |
152 | CLK_COMMON_REG_LIST_DCE_BASE() | 152 | CLK_COMMON_REG_LIST_DCE_BASE() |
153 | }; | 153 | }; |
154 | 154 | ||
155 | static const struct dccg_shift disp_clk_shift = { | 155 | static const struct clk_mgr_shift disp_clk_shift = { |
156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 156 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static const struct dccg_mask disp_clk_mask = { | 159 | static const struct clk_mgr_mask disp_clk_mask = { |
160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 160 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
161 | }; | 161 | }; |
162 | 162 | ||
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create( | |||
551 | static const struct encoder_feature_support link_enc_feature = { | 551 | static const struct encoder_feature_support link_enc_feature = { |
552 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 552 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
553 | .max_hdmi_pixel_clock = 600000, | 553 | .max_hdmi_pixel_clock = 600000, |
554 | .ycbcr420_supported = true, | 554 | .hdmi_ycbcr420_supported = true, |
555 | .dp_ycbcr420_supported = false, | ||
555 | .flags.bits.IS_HBR2_CAPABLE = true, | 556 | .flags.bits.IS_HBR2_CAPABLE = true, |
556 | .flags.bits.IS_HBR3_CAPABLE = true, | 557 | .flags.bits.IS_HBR3_CAPABLE = true, |
557 | .flags.bits.IS_TPS3_CAPABLE = true, | 558 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
749 | if (pool->base.dmcu != NULL) | 750 | if (pool->base.dmcu != NULL) |
750 | dce_dmcu_destroy(&pool->base.dmcu); | 751 | dce_dmcu_destroy(&pool->base.dmcu); |
751 | 752 | ||
752 | if (pool->base.dccg != NULL) | 753 | if (pool->base.clk_mgr != NULL) |
753 | dce_dccg_destroy(&pool->base.dccg); | 754 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
754 | 755 | ||
755 | if (pool->base.irqs != NULL) { | 756 | if (pool->base.irqs != NULL) { |
756 | dal_irq_service_destroy(&pool->base.irqs); | 757 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1015 | &clks); | 1016 | &clks); |
1016 | 1017 | ||
1017 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1018 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1018 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000); | 1019 | clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1019 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1020 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1020 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER, | 1021 | clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1021 | 1000); | 1022 | 1000); |
1022 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1023 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1023 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER, | 1024 | clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ, |
1024 | 1000); | 1025 | 1000); |
1025 | 1026 | ||
1026 | return; | 1027 | return; |
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
1056 | * YCLK = UMACLK*m_memoryTypeMultiplier | 1057 | * YCLK = UMACLK*m_memoryTypeMultiplier |
1057 | */ | 1058 | */ |
1058 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 1059 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
1059 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); | 1060 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
1060 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 1061 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
1061 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 1062 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
1062 | 1000); | 1063 | 1000); |
1063 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 1064 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
1064 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 1065 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
1065 | 1000); | 1066 | 1000); |
1066 | 1067 | ||
1067 | /* Now notify PPLib/SMU about which Watermarks sets they should select | 1068 | /* Now notify PPLib/SMU about which Watermarks sets they should select |
@@ -1131,7 +1132,6 @@ static bool construct( | |||
1131 | { | 1132 | { |
1132 | unsigned int i; | 1133 | unsigned int i; |
1133 | struct dc_context *ctx = dc->ctx; | 1134 | struct dc_context *ctx = dc->ctx; |
1134 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1135 | 1135 | ||
1136 | ctx->dc_bios->regs = &bios_regs; | 1136 | ctx->dc_bios->regs = &bios_regs; |
1137 | 1137 | ||
@@ -1199,11 +1199,11 @@ static bool construct( | |||
1199 | } | 1199 | } |
1200 | } | 1200 | } |
1201 | 1201 | ||
1202 | pool->base.dccg = dce112_dccg_create(ctx, | 1202 | pool->base.clk_mgr = dce112_clk_mgr_create(ctx, |
1203 | &disp_clk_regs, | 1203 | &disp_clk_regs, |
1204 | &disp_clk_shift, | 1204 | &disp_clk_shift, |
1205 | &disp_clk_mask); | 1205 | &disp_clk_mask); |
1206 | if (pool->base.dccg == NULL) { | 1206 | if (pool->base.clk_mgr == NULL) { |
1207 | dm_error("DC: failed to create display clock!\n"); | 1207 | dm_error("DC: failed to create display clock!\n"); |
1208 | BREAK_TO_DEBUGGER(); | 1208 | BREAK_TO_DEBUGGER(); |
1209 | goto res_create_fail; | 1209 | goto res_create_fail; |
@@ -1229,13 +1229,6 @@ static bool construct( | |||
1229 | goto res_create_fail; | 1229 | goto res_create_fail; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | /* get static clock information for PPLIB or firmware, save | ||
1233 | * max_clock_state | ||
1234 | */ | ||
1235 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1236 | pool->base.dccg->max_clks_state = | ||
1237 | static_clk_info.max_clocks_state; | ||
1238 | |||
1239 | { | 1232 | { |
1240 | struct irq_service_init_data init_data; | 1233 | struct irq_service_init_data init_data; |
1241 | init_data.ctx = dc->ctx; | 1234 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 79ab5f9f9115..f12696674eb0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "resource.h" | 31 | #include "resource.h" |
32 | #include "include/irq_service_interface.h" | 32 | #include "include/irq_service_interface.h" |
33 | #include "dce120_resource.h" | 33 | #include "dce120_resource.h" |
34 | |||
34 | #include "dce112/dce112_resource.h" | 35 | #include "dce112/dce112_resource.h" |
35 | 36 | ||
36 | #include "dce110/dce110_resource.h" | 37 | #include "dce110/dce110_resource.h" |
@@ -39,7 +40,6 @@ | |||
39 | #include "irq/dce120/irq_service_dce120.h" | 40 | #include "irq/dce120/irq_service_dce120.h" |
40 | #include "dce/dce_opp.h" | 41 | #include "dce/dce_opp.h" |
41 | #include "dce/dce_clock_source.h" | 42 | #include "dce/dce_clock_source.h" |
42 | #include "dce/dce_clocks.h" | ||
43 | #include "dce/dce_ipp.h" | 43 | #include "dce/dce_ipp.h" |
44 | #include "dce/dce_mem_input.h" | 44 | #include "dce/dce_mem_input.h" |
45 | 45 | ||
@@ -47,6 +47,7 @@ | |||
47 | #include "dce120/dce120_hw_sequencer.h" | 47 | #include "dce120/dce120_hw_sequencer.h" |
48 | #include "dce/dce_transform.h" | 48 | #include "dce/dce_transform.h" |
49 | 49 | ||
50 | #include "dce/dce_clk_mgr.h" | ||
50 | #include "dce/dce_audio.h" | 51 | #include "dce/dce_audio.h" |
51 | #include "dce/dce_link_encoder.h" | 52 | #include "dce/dce_link_encoder.h" |
52 | #include "dce/dce_stream_encoder.h" | 53 | #include "dce/dce_stream_encoder.h" |
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
573 | if (pool->base.dmcu != NULL) | 574 | if (pool->base.dmcu != NULL) |
574 | dce_dmcu_destroy(&pool->base.dmcu); | 575 | dce_dmcu_destroy(&pool->base.dmcu); |
575 | 576 | ||
576 | if (pool->base.dccg != NULL) | 577 | if (pool->base.clk_mgr != NULL) |
577 | dce_dccg_destroy(&pool->base.dccg); | 578 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
578 | } | 579 | } |
579 | 580 | ||
580 | static void read_dce_straps( | 581 | static void read_dce_straps( |
@@ -606,7 +607,8 @@ static struct audio *create_audio( | |||
606 | static const struct encoder_feature_support link_enc_feature = { | 607 | static const struct encoder_feature_support link_enc_feature = { |
607 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 608 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
608 | .max_hdmi_pixel_clock = 600000, | 609 | .max_hdmi_pixel_clock = 600000, |
609 | .ycbcr420_supported = true, | 610 | .hdmi_ycbcr420_supported = true, |
611 | .dp_ycbcr420_supported = false, | ||
610 | .flags.bits.IS_HBR2_CAPABLE = true, | 612 | .flags.bits.IS_HBR2_CAPABLE = true, |
611 | .flags.bits.IS_HBR3_CAPABLE = true, | 613 | .flags.bits.IS_HBR3_CAPABLE = true, |
612 | .flags.bits.IS_TPS3_CAPABLE = true, | 614 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) | |||
834 | * YCLK = UMACLK*m_memoryTypeMultiplier | 836 | * YCLK = UMACLK*m_memoryTypeMultiplier |
835 | */ | 837 | */ |
836 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( | 838 | dc->bw_vbios->low_yclk = bw_frc_to_fixed( |
837 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000); | 839 | mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000); |
838 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( | 840 | dc->bw_vbios->mid_yclk = bw_frc_to_fixed( |
839 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 841 | mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
840 | 1000); | 842 | 1000); |
841 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( | 843 | dc->bw_vbios->high_yclk = bw_frc_to_fixed( |
842 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, | 844 | mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, |
843 | 1000); | 845 | 1000); |
844 | 846 | ||
845 | /* Now notify PPLib/SMU about which Watermarks sets they should select | 847 | /* Now notify PPLib/SMU about which Watermarks sets they should select |
@@ -973,8 +975,8 @@ static bool construct( | |||
973 | } | 975 | } |
974 | } | 976 | } |
975 | 977 | ||
976 | pool->base.dccg = dce120_dccg_create(ctx); | 978 | pool->base.clk_mgr = dce120_clk_mgr_create(ctx); |
977 | if (pool->base.dccg == NULL) { | 979 | if (pool->base.clk_mgr == NULL) { |
978 | dm_error("DC: failed to create display clock!\n"); | 980 | dm_error("DC: failed to create display clock!\n"); |
979 | BREAK_TO_DEBUGGER(); | 981 | BREAK_TO_DEBUGGER(); |
980 | goto dccg_create_fail; | 982 | goto dccg_create_fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index 6c6a1a16af19..a60a90e68d91 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c | |||
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc) | |||
76 | 76 | ||
77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; | 78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; |
79 | dc->hwss.set_bandwidth = dce100_set_bandwidth; | 79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
80 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; | ||
80 | } | 81 | } |
81 | 82 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index d68f951f9869..6d40b3d54ac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "dce110/dce110_timing_generator.h" | 37 | #include "dce110/dce110_timing_generator.h" |
38 | #include "dce110/dce110_resource.h" | 38 | #include "dce110/dce110_resource.h" |
39 | #include "dce80/dce80_timing_generator.h" | 39 | #include "dce80/dce80_timing_generator.h" |
40 | #include "dce/dce_clk_mgr.h" | ||
40 | #include "dce/dce_mem_input.h" | 41 | #include "dce/dce_mem_input.h" |
41 | #include "dce/dce_link_encoder.h" | 42 | #include "dce/dce_link_encoder.h" |
42 | #include "dce/dce_stream_encoder.h" | 43 | #include "dce/dce_stream_encoder.h" |
@@ -44,7 +45,6 @@ | |||
44 | #include "dce/dce_ipp.h" | 45 | #include "dce/dce_ipp.h" |
45 | #include "dce/dce_transform.h" | 46 | #include "dce/dce_transform.h" |
46 | #include "dce/dce_opp.h" | 47 | #include "dce/dce_opp.h" |
47 | #include "dce/dce_clocks.h" | ||
48 | #include "dce/dce_clock_source.h" | 48 | #include "dce/dce_clock_source.h" |
49 | #include "dce/dce_audio.h" | 49 | #include "dce/dce_audio.h" |
50 | #include "dce/dce_hwseq.h" | 50 | #include "dce/dce_hwseq.h" |
@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = { | |||
155 | .reg_name = mm ## block ## id ## _ ## reg_name | 155 | .reg_name = mm ## block ## id ## _ ## reg_name |
156 | 156 | ||
157 | 157 | ||
158 | static const struct dccg_registers disp_clk_regs = { | 158 | static const struct clk_mgr_registers disp_clk_regs = { |
159 | CLK_COMMON_REG_LIST_DCE_BASE() | 159 | CLK_COMMON_REG_LIST_DCE_BASE() |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static const struct dccg_shift disp_clk_shift = { | 162 | static const struct clk_mgr_shift disp_clk_shift = { |
163 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) | 163 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT) |
164 | }; | 164 | }; |
165 | 165 | ||
166 | static const struct dccg_mask disp_clk_mask = { | 166 | static const struct clk_mgr_mask disp_clk_mask = { |
167 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) | 167 | CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK) |
168 | }; | 168 | }; |
169 | 169 | ||
@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool) | |||
779 | } | 779 | } |
780 | } | 780 | } |
781 | 781 | ||
782 | if (pool->base.dccg != NULL) | 782 | if (pool->base.clk_mgr != NULL) |
783 | dce_dccg_destroy(&pool->base.dccg); | 783 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
784 | 784 | ||
785 | if (pool->base.irqs != NULL) { | 785 | if (pool->base.irqs != NULL) { |
786 | dal_irq_service_destroy(&pool->base.irqs); | 786 | dal_irq_service_destroy(&pool->base.irqs); |
@@ -793,7 +793,7 @@ bool dce80_validate_bandwidth( | |||
793 | { | 793 | { |
794 | /* TODO implement when needed but for now hardcode max value*/ | 794 | /* TODO implement when needed but for now hardcode max value*/ |
795 | context->bw.dce.dispclk_khz = 681000; | 795 | context->bw.dce.dispclk_khz = 681000; |
796 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER; | 796 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; |
797 | 797 | ||
798 | return true; | 798 | return true; |
799 | } | 799 | } |
@@ -855,7 +855,6 @@ static bool dce80_construct( | |||
855 | struct dc_context *ctx = dc->ctx; | 855 | struct dc_context *ctx = dc->ctx; |
856 | struct dc_firmware_info info; | 856 | struct dc_firmware_info info; |
857 | struct dc_bios *bp; | 857 | struct dc_bios *bp; |
858 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
859 | 858 | ||
860 | ctx->dc_bios->regs = &bios_regs; | 859 | ctx->dc_bios->regs = &bios_regs; |
861 | 860 | ||
@@ -918,11 +917,11 @@ static bool dce80_construct( | |||
918 | } | 917 | } |
919 | } | 918 | } |
920 | 919 | ||
921 | pool->base.dccg = dce_dccg_create(ctx, | 920 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
922 | &disp_clk_regs, | 921 | &disp_clk_regs, |
923 | &disp_clk_shift, | 922 | &disp_clk_shift, |
924 | &disp_clk_mask); | 923 | &disp_clk_mask); |
925 | if (pool->base.dccg == NULL) { | 924 | if (pool->base.clk_mgr == NULL) { |
926 | dm_error("DC: failed to create display clock!\n"); | 925 | dm_error("DC: failed to create display clock!\n"); |
927 | BREAK_TO_DEBUGGER(); | 926 | BREAK_TO_DEBUGGER(); |
928 | goto res_create_fail; | 927 | goto res_create_fail; |
@@ -948,10 +947,6 @@ static bool dce80_construct( | |||
948 | goto res_create_fail; | 947 | goto res_create_fail; |
949 | } | 948 | } |
950 | 949 | ||
951 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
952 | pool->base.dccg->max_clks_state = | ||
953 | static_clk_info.max_clocks_state; | ||
954 | |||
955 | { | 950 | { |
956 | struct irq_service_init_data init_data; | 951 | struct irq_service_init_data init_data; |
957 | init_data.ctx = dc->ctx; | 952 | init_data.ctx = dc->ctx; |
@@ -1065,7 +1060,6 @@ static bool dce81_construct( | |||
1065 | struct dc_context *ctx = dc->ctx; | 1060 | struct dc_context *ctx = dc->ctx; |
1066 | struct dc_firmware_info info; | 1061 | struct dc_firmware_info info; |
1067 | struct dc_bios *bp; | 1062 | struct dc_bios *bp; |
1068 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1069 | 1063 | ||
1070 | ctx->dc_bios->regs = &bios_regs; | 1064 | ctx->dc_bios->regs = &bios_regs; |
1071 | 1065 | ||
@@ -1128,11 +1122,11 @@ static bool dce81_construct( | |||
1128 | } | 1122 | } |
1129 | } | 1123 | } |
1130 | 1124 | ||
1131 | pool->base.dccg = dce_dccg_create(ctx, | 1125 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
1132 | &disp_clk_regs, | 1126 | &disp_clk_regs, |
1133 | &disp_clk_shift, | 1127 | &disp_clk_shift, |
1134 | &disp_clk_mask); | 1128 | &disp_clk_mask); |
1135 | if (pool->base.dccg == NULL) { | 1129 | if (pool->base.clk_mgr == NULL) { |
1136 | dm_error("DC: failed to create display clock!\n"); | 1130 | dm_error("DC: failed to create display clock!\n"); |
1137 | BREAK_TO_DEBUGGER(); | 1131 | BREAK_TO_DEBUGGER(); |
1138 | goto res_create_fail; | 1132 | goto res_create_fail; |
@@ -1158,10 +1152,6 @@ static bool dce81_construct( | |||
1158 | goto res_create_fail; | 1152 | goto res_create_fail; |
1159 | } | 1153 | } |
1160 | 1154 | ||
1161 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1162 | pool->base.dccg->max_clks_state = | ||
1163 | static_clk_info.max_clocks_state; | ||
1164 | |||
1165 | { | 1155 | { |
1166 | struct irq_service_init_data init_data; | 1156 | struct irq_service_init_data init_data; |
1167 | init_data.ctx = dc->ctx; | 1157 | init_data.ctx = dc->ctx; |
@@ -1275,7 +1265,6 @@ static bool dce83_construct( | |||
1275 | struct dc_context *ctx = dc->ctx; | 1265 | struct dc_context *ctx = dc->ctx; |
1276 | struct dc_firmware_info info; | 1266 | struct dc_firmware_info info; |
1277 | struct dc_bios *bp; | 1267 | struct dc_bios *bp; |
1278 | struct dm_pp_static_clock_info static_clk_info = {0}; | ||
1279 | 1268 | ||
1280 | ctx->dc_bios->regs = &bios_regs; | 1269 | ctx->dc_bios->regs = &bios_regs; |
1281 | 1270 | ||
@@ -1334,11 +1323,11 @@ static bool dce83_construct( | |||
1334 | } | 1323 | } |
1335 | } | 1324 | } |
1336 | 1325 | ||
1337 | pool->base.dccg = dce_dccg_create(ctx, | 1326 | pool->base.clk_mgr = dce_clk_mgr_create(ctx, |
1338 | &disp_clk_regs, | 1327 | &disp_clk_regs, |
1339 | &disp_clk_shift, | 1328 | &disp_clk_shift, |
1340 | &disp_clk_mask); | 1329 | &disp_clk_mask); |
1341 | if (pool->base.dccg == NULL) { | 1330 | if (pool->base.clk_mgr == NULL) { |
1342 | dm_error("DC: failed to create display clock!\n"); | 1331 | dm_error("DC: failed to create display clock!\n"); |
1343 | BREAK_TO_DEBUGGER(); | 1332 | BREAK_TO_DEBUGGER(); |
1344 | goto res_create_fail; | 1333 | goto res_create_fail; |
@@ -1364,10 +1353,6 @@ static bool dce83_construct( | |||
1364 | goto res_create_fail; | 1353 | goto res_create_fail; |
1365 | } | 1354 | } |
1366 | 1355 | ||
1367 | if (dm_pp_get_static_clocks(ctx, &static_clk_info)) | ||
1368 | pool->base.dccg->max_clks_state = | ||
1369 | static_clk_info.max_clocks_state; | ||
1370 | |||
1371 | { | 1356 | { |
1372 | struct irq_service_init_data init_data; | 1357 | struct irq_service_init_data init_data; |
1373 | init_data.ctx = dc->ctx; | 1358 | init_data.ctx = dc->ctx; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 032f872be89c..55f293c8a3c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ | 25 | DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \ |
26 | dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ | 26 | dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ |
27 | dcn10_hubp.o dcn10_mpc.o \ | 27 | dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \ |
28 | dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ | 28 | dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ |
29 | dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o | 29 | dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o |
30 | 30 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c new file mode 100644 index 000000000000..20f531d27e2b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c | |||
@@ -0,0 +1,379 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #include "dcn10_clk_mgr.h" | ||
27 | |||
28 | #include "reg_helper.h" | ||
29 | #include "core_types.h" | ||
30 | |||
31 | #define TO_DCE_CLK_MGR(clocks)\ | ||
32 | container_of(clocks, struct dce_clk_mgr, base) | ||
33 | |||
34 | #define REG(reg) \ | ||
35 | (clk_mgr_dce->regs->reg) | ||
36 | |||
37 | #undef FN | ||
38 | #define FN(reg_name, field_name) \ | ||
39 | clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name | ||
40 | |||
41 | #define CTX \ | ||
42 | clk_mgr_dce->base.ctx | ||
43 | #define DC_LOGGER \ | ||
44 | clk_mgr->ctx->logger | ||
45 | |||
46 | void dcn1_pplib_apply_display_requirements( | ||
47 | struct dc *dc, | ||
48 | struct dc_state *context) | ||
49 | { | ||
50 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
51 | |||
52 | pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; | ||
53 | pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz; | ||
54 | pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; | ||
55 | pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz; | ||
56 | pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz; | ||
57 | pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
58 | dce110_fill_display_configs(context, pp_display_cfg); | ||
59 | |||
60 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
61 | } | ||
62 | |||
63 | static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) | ||
64 | { | ||
65 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
66 | bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz; | ||
67 | int disp_clk_threshold = new_clocks->max_supported_dppclk_khz; | ||
68 | bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz; | ||
69 | |||
70 | /* increase clock, looking for div is 0 for current, request div is 1*/ | ||
71 | if (dispclk_increase) { | ||
72 | /* already divided by 2, no need to reach target clk with 2 steps*/ | ||
73 | if (cur_dpp_div) | ||
74 | return new_clocks->dispclk_khz; | ||
75 | |||
76 | /* request disp clk is lower than maximum supported dpp clk, | ||
77 | * no need to reach target clk with two steps. | ||
78 | */ | ||
79 | if (new_clocks->dispclk_khz <= disp_clk_threshold) | ||
80 | return new_clocks->dispclk_khz; | ||
81 | |||
82 | /* target dpp clk not request divided by 2, still within threshold */ | ||
83 | if (!request_dpp_div) | ||
84 | return new_clocks->dispclk_khz; | ||
85 | |||
86 | } else { | ||
87 | /* decrease clock, looking for current dppclk divided by 2, | ||
88 | * request dppclk not divided by 2. | ||
89 | */ | ||
90 | |||
91 | /* current dpp clk not divided by 2, no need to ramp*/ | ||
92 | if (!cur_dpp_div) | ||
93 | return new_clocks->dispclk_khz; | ||
94 | |||
95 | /* current disp clk is lower than current maximum dpp clk, | ||
96 | * no need to ramp | ||
97 | */ | ||
98 | if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold) | ||
99 | return new_clocks->dispclk_khz; | ||
100 | |||
101 | /* request dpp clk need to be divided by 2 */ | ||
102 | if (request_dpp_div) | ||
103 | return new_clocks->dispclk_khz; | ||
104 | } | ||
105 | |||
106 | return disp_clk_threshold; | ||
107 | } | ||
108 | |||
109 | static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks) | ||
110 | { | ||
111 | struct dc *dc = clk_mgr->ctx->dc; | ||
112 | int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks); | ||
113 | bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; | ||
114 | int i; | ||
115 | |||
116 | /* set disp clk to dpp clk threshold */ | ||
117 | dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold); | ||
118 | |||
119 | /* update request dpp clk division option */ | ||
120 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
121 | struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; | ||
122 | |||
123 | if (!pipe_ctx->plane_state) | ||
124 | continue; | ||
125 | |||
126 | pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control( | ||
127 | pipe_ctx->plane_res.dpp, | ||
128 | request_dpp_div, | ||
129 | true); | ||
130 | } | ||
131 | |||
132 | /* If target clk not same as dppclk threshold, set to target clock */ | ||
133 | if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz) | ||
134 | dce112_set_clock(clk_mgr, new_clocks->dispclk_khz); | ||
135 | |||
136 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
137 | clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; | ||
138 | clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz; | ||
139 | } | ||
140 | |||
141 | static int get_active_display_cnt( | ||
142 | struct dc *dc, | ||
143 | struct dc_state *context) | ||
144 | { | ||
145 | int i, display_count; | ||
146 | |||
147 | display_count = 0; | ||
148 | for (i = 0; i < context->stream_count; i++) { | ||
149 | const struct dc_stream_state *stream = context->streams[i]; | ||
150 | |||
151 | /* | ||
152 | * Only notify active stream or virtual stream. | ||
153 | * Need to notify virtual stream to work around | ||
154 | * headless case. HPD does not fire when system is in | ||
155 | * S0i2. | ||
156 | */ | ||
157 | if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL) | ||
158 | display_count++; | ||
159 | } | ||
160 | |||
161 | return display_count; | ||
162 | } | ||
163 | |||
164 | static void notify_deep_sleep_dcfclk_to_smu( | ||
165 | struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz) | ||
166 | { | ||
167 | int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz | ||
168 | /* | ||
169 | * if function pointer not set up, this message is | ||
170 | * sent as part of pplib_apply_display_requirements. | ||
171 | * So just return. | ||
172 | */ | ||
173 | if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk) | ||
174 | return; | ||
175 | |||
176 | min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up | ||
177 | pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz); | ||
178 | } | ||
179 | |||
180 | static void notify_hard_min_dcfclk_to_smu( | ||
181 | struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz) | ||
182 | { | ||
183 | int min_dcf_clk_mhz; //minimum required DCF clock in mhz | ||
184 | |||
185 | /* | ||
186 | * if function pointer not set up, this message is | ||
187 | * sent as part of pplib_apply_display_requirements. | ||
188 | * So just return. | ||
189 | */ | ||
190 | if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq) | ||
191 | return; | ||
192 | |||
193 | min_dcf_clk_mhz = min_dcf_clk_khz / 1000; | ||
194 | |||
195 | pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz); | ||
196 | } | ||
197 | |||
198 | static void notify_hard_min_fclk_to_smu( | ||
199 | struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz) | ||
200 | { | ||
201 | int min_f_clk_mhz; //minimum required F clock in mhz | ||
202 | |||
203 | /* | ||
204 | * if function pointer not set up, this message is | ||
205 | * sent as part of pplib_apply_display_requirements. | ||
206 | * So just return. | ||
207 | */ | ||
208 | if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq) | ||
209 | return; | ||
210 | |||
211 | min_f_clk_mhz = min_f_clk_khz / 1000; | ||
212 | |||
213 | pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz); | ||
214 | } | ||
215 | |||
216 | static void dcn1_update_clocks(struct clk_mgr *clk_mgr, | ||
217 | struct dc_state *context, | ||
218 | bool safe_to_lower) | ||
219 | { | ||
220 | struct dc *dc = clk_mgr->ctx->dc; | ||
221 | struct dc_clocks *new_clocks = &context->bw.dcn.clk; | ||
222 | struct pp_smu_display_requirement_rv *smu_req_cur = | ||
223 | &dc->res_pool->pp_smu_req; | ||
224 | struct pp_smu_display_requirement_rv smu_req = *smu_req_cur; | ||
225 | struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu; | ||
226 | struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; | ||
227 | bool send_request_to_increase = false; | ||
228 | bool send_request_to_lower = false; | ||
229 | int display_count; | ||
230 | |||
231 | bool enter_display_off = false; | ||
232 | |||
233 | display_count = get_active_display_cnt(dc, context); | ||
234 | |||
235 | if (display_count == 0) | ||
236 | enter_display_off = true; | ||
237 | |||
238 | if (enter_display_off == safe_to_lower) { | ||
239 | /* | ||
240 | * Notify SMU active displays | ||
241 | * if function pointer not set up, this message is | ||
242 | * sent as part of pplib_apply_display_requirements. | ||
243 | */ | ||
244 | if (pp_smu->set_display_count) | ||
245 | pp_smu->set_display_count(&pp_smu->pp_smu, display_count); | ||
246 | else | ||
247 | smu_req.display_count = display_count; | ||
248 | |||
249 | } | ||
250 | |||
251 | if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz | ||
252 | || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz | ||
253 | || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz | ||
254 | || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz) | ||
255 | send_request_to_increase = true; | ||
256 | |||
257 | if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) { | ||
258 | clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz; | ||
259 | |||
260 | send_request_to_lower = true; | ||
261 | } | ||
262 | |||
263 | // F Clock | ||
264 | if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) { | ||
265 | clk_mgr->clks.fclk_khz = new_clocks->fclk_khz; | ||
266 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK; | ||
267 | clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz; | ||
268 | smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000; | ||
269 | |||
270 | notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz); | ||
271 | |||
272 | send_request_to_lower = true; | ||
273 | } | ||
274 | |||
275 | //DCF Clock | ||
276 | if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) { | ||
277 | clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz; | ||
278 | smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000; | ||
279 | |||
280 | send_request_to_lower = true; | ||
281 | } | ||
282 | |||
283 | if (should_set_clock(safe_to_lower, | ||
284 | new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) { | ||
285 | clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz; | ||
286 | smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000; | ||
287 | |||
288 | send_request_to_lower = true; | ||
289 | } | ||
290 | |||
291 | /* make sure dcf clk is before dpp clk to | ||
292 | * make sure we have enough voltage to run dpp clk | ||
293 | */ | ||
294 | if (send_request_to_increase) { | ||
295 | /*use dcfclk to request voltage*/ | ||
296 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
297 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
298 | |||
299 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | ||
300 | |||
301 | if (pp_smu->set_display_requirement) | ||
302 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
303 | |||
304 | notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); | ||
305 | dcn1_pplib_apply_display_requirements(dc, context); | ||
306 | } | ||
307 | |||
308 | /* dcn1 dppclk is tied to dispclk */ | ||
309 | /* program dispclk on = as a w/a for sleep resume clock ramping issues */ | ||
310 | if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz) | ||
311 | || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) { | ||
312 | dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks); | ||
313 | clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; | ||
314 | |||
315 | send_request_to_lower = true; | ||
316 | } | ||
317 | |||
318 | if (!send_request_to_increase && send_request_to_lower) { | ||
319 | /*use dcfclk to request voltage*/ | ||
320 | clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK; | ||
321 | clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks); | ||
322 | |||
323 | notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz); | ||
324 | |||
325 | if (pp_smu->set_display_requirement) | ||
326 | pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req); | ||
327 | |||
328 | notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz); | ||
329 | dcn1_pplib_apply_display_requirements(dc, context); | ||
330 | } | ||
331 | |||
332 | |||
333 | *smu_req_cur = smu_req; | ||
334 | } | ||
335 | |||
336 | static const struct clk_mgr_funcs dcn1_funcs = { | ||
337 | .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, | ||
338 | .update_clocks = dcn1_update_clocks | ||
339 | }; | ||
340 | |||
341 | struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx) | ||
342 | { | ||
343 | struct dc_debug_options *debug = &ctx->dc->debug; | ||
344 | struct dc_bios *bp = ctx->dc_bios; | ||
345 | struct dc_firmware_info fw_info = { { 0 } }; | ||
346 | struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL); | ||
347 | |||
348 | if (clk_mgr_dce == NULL) { | ||
349 | BREAK_TO_DEBUGGER(); | ||
350 | return NULL; | ||
351 | } | ||
352 | |||
353 | clk_mgr_dce->base.ctx = ctx; | ||
354 | clk_mgr_dce->base.funcs = &dcn1_funcs; | ||
355 | |||
356 | clk_mgr_dce->dfs_bypass_disp_clk = 0; | ||
357 | |||
358 | clk_mgr_dce->dprefclk_ss_percentage = 0; | ||
359 | clk_mgr_dce->dprefclk_ss_divider = 1000; | ||
360 | clk_mgr_dce->ss_on_dprefclk = false; | ||
361 | |||
362 | clk_mgr_dce->dprefclk_khz = 600000; | ||
363 | if (bp->integrated_info) | ||
364 | clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; | ||
365 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) { | ||
366 | bp->funcs->get_firmware_info(bp, &fw_info); | ||
367 | clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq; | ||
368 | if (clk_mgr_dce->dentist_vco_freq_khz == 0) | ||
369 | clk_mgr_dce->dentist_vco_freq_khz = 3600000; | ||
370 | } | ||
371 | |||
372 | if (!debug->disable_dfs_bypass && bp->integrated_info) | ||
373 | if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE) | ||
374 | clk_mgr_dce->dfs_bypass_enabled = true; | ||
375 | |||
376 | dce_clock_read_ss_info(clk_mgr_dce); | ||
377 | |||
378 | return &clk_mgr_dce->base; | ||
379 | } | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h new file mode 100644 index 000000000000..9dbaf6578006 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #ifndef __DCN10_CLK_MGR_H__ | ||
27 | #define __DCN10_CLK_MGR_H__ | ||
28 | |||
29 | #include "../dce/dce_clk_mgr.h" | ||
30 | |||
31 | void dcn1_pplib_apply_display_requirements( | ||
32 | struct dc *dc, | ||
33 | struct dc_state *context); | ||
34 | |||
35 | struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx); | ||
36 | |||
37 | #endif //__DCN10_CLK_MGR_H__ | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 5d95a997fd9f..3eea44092a04 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | |||
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func( | |||
71 | unsigned int i = 0; | 71 | unsigned int i = 0; |
72 | 72 | ||
73 | REG_SET_2(reg->start_cntl_b, 0, | 73 | REG_SET_2(reg->start_cntl_b, 0, |
74 | exp_region_start, params->arr_points[0].custom_float_x, | 74 | exp_region_start, params->corner_points[0].blue.custom_float_x, |
75 | exp_resion_start_segment, 0); | 75 | exp_resion_start_segment, 0); |
76 | REG_SET_2(reg->start_cntl_g, 0, | 76 | REG_SET_2(reg->start_cntl_g, 0, |
77 | exp_region_start, params->arr_points[0].custom_float_x, | 77 | exp_region_start, params->corner_points[0].green.custom_float_x, |
78 | exp_resion_start_segment, 0); | 78 | exp_resion_start_segment, 0); |
79 | REG_SET_2(reg->start_cntl_r, 0, | 79 | REG_SET_2(reg->start_cntl_r, 0, |
80 | exp_region_start, params->arr_points[0].custom_float_x, | 80 | exp_region_start, params->corner_points[0].red.custom_float_x, |
81 | exp_resion_start_segment, 0); | 81 | exp_resion_start_segment, 0); |
82 | 82 | ||
83 | REG_SET(reg->start_slope_cntl_b, 0, | 83 | REG_SET(reg->start_slope_cntl_b, 0, |
84 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 84 | field_region_linear_slope, params->corner_points[0].blue.custom_float_slope); |
85 | REG_SET(reg->start_slope_cntl_g, 0, | 85 | REG_SET(reg->start_slope_cntl_g, 0, |
86 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 86 | field_region_linear_slope, params->corner_points[0].green.custom_float_slope); |
87 | REG_SET(reg->start_slope_cntl_r, 0, | 87 | REG_SET(reg->start_slope_cntl_r, 0, |
88 | field_region_linear_slope, params->arr_points[0].custom_float_slope); | 88 | field_region_linear_slope, params->corner_points[0].red.custom_float_slope); |
89 | 89 | ||
90 | REG_SET(reg->start_end_cntl1_b, 0, | 90 | REG_SET(reg->start_end_cntl1_b, 0, |
91 | field_region_end, params->arr_points[1].custom_float_x); | 91 | field_region_end, params->corner_points[1].blue.custom_float_x); |
92 | REG_SET_2(reg->start_end_cntl2_b, 0, | 92 | REG_SET_2(reg->start_end_cntl2_b, 0, |
93 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 93 | field_region_end_slope, params->corner_points[1].blue.custom_float_slope, |
94 | field_region_end_base, params->arr_points[1].custom_float_y); | 94 | field_region_end_base, params->corner_points[1].blue.custom_float_y); |
95 | 95 | ||
96 | REG_SET(reg->start_end_cntl1_g, 0, | 96 | REG_SET(reg->start_end_cntl1_g, 0, |
97 | field_region_end, params->arr_points[1].custom_float_x); | 97 | field_region_end, params->corner_points[1].green.custom_float_x); |
98 | REG_SET_2(reg->start_end_cntl2_g, 0, | 98 | REG_SET_2(reg->start_end_cntl2_g, 0, |
99 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 99 | field_region_end_slope, params->corner_points[1].green.custom_float_slope, |
100 | field_region_end_base, params->arr_points[1].custom_float_y); | 100 | field_region_end_base, params->corner_points[1].green.custom_float_y); |
101 | 101 | ||
102 | REG_SET(reg->start_end_cntl1_r, 0, | 102 | REG_SET(reg->start_end_cntl1_r, 0, |
103 | field_region_end, params->arr_points[1].custom_float_x); | 103 | field_region_end, params->corner_points[1].red.custom_float_x); |
104 | REG_SET_2(reg->start_end_cntl2_r, 0, | 104 | REG_SET_2(reg->start_end_cntl2_r, 0, |
105 | field_region_end_slope, params->arr_points[1].custom_float_slope, | 105 | field_region_end_slope, params->corner_points[1].red.custom_float_slope, |
106 | field_region_end_base, params->arr_points[1].custom_float_y); | 106 | field_region_end_base, params->corner_points[1].red.custom_float_y); |
107 | 107 | ||
108 | for (reg_region_cur = reg->region_start; | 108 | for (reg_region_cur = reg->region_start; |
109 | reg_region_cur <= reg->region_end; | 109 | reg_region_cur <= reg->region_end; |
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func( | |||
127 | 127 | ||
128 | bool cm_helper_convert_to_custom_float( | 128 | bool cm_helper_convert_to_custom_float( |
129 | struct pwl_result_data *rgb_resulted, | 129 | struct pwl_result_data *rgb_resulted, |
130 | struct curve_points *arr_points, | 130 | struct curve_points3 *corner_points, |
131 | uint32_t hw_points_num, | 131 | uint32_t hw_points_num, |
132 | bool fixpoint) | 132 | bool fixpoint) |
133 | { | 133 | { |
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float( | |||
141 | fmt.mantissa_bits = 12; | 141 | fmt.mantissa_bits = 12; |
142 | fmt.sign = false; | 142 | fmt.sign = false; |
143 | 143 | ||
144 | if (!convert_to_custom_float_format(arr_points[0].x, &fmt, | 144 | /* corner_points[0] - beginning base, slope offset for R,G,B |
145 | &arr_points[0].custom_float_x)) { | 145 | * corner_points[1] - end base, slope offset for R,G,B |
146 | */ | ||
147 | if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt, | ||
148 | &corner_points[0].red.custom_float_x)) { | ||
149 | BREAK_TO_DEBUGGER(); | ||
150 | return false; | ||
151 | } | ||
152 | if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt, | ||
153 | &corner_points[0].green.custom_float_x)) { | ||
154 | BREAK_TO_DEBUGGER(); | ||
155 | return false; | ||
156 | } | ||
157 | if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt, | ||
158 | &corner_points[0].blue.custom_float_x)) { | ||
146 | BREAK_TO_DEBUGGER(); | 159 | BREAK_TO_DEBUGGER(); |
147 | return false; | 160 | return false; |
148 | } | 161 | } |
149 | 162 | ||
150 | if (!convert_to_custom_float_format(arr_points[0].offset, &fmt, | 163 | if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt, |
151 | &arr_points[0].custom_float_offset)) { | 164 | &corner_points[0].red.custom_float_offset)) { |
165 | BREAK_TO_DEBUGGER(); | ||
166 | return false; | ||
167 | } | ||
168 | if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt, | ||
169 | &corner_points[0].green.custom_float_offset)) { | ||
170 | BREAK_TO_DEBUGGER(); | ||
171 | return false; | ||
172 | } | ||
173 | if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt, | ||
174 | &corner_points[0].blue.custom_float_offset)) { | ||
152 | BREAK_TO_DEBUGGER(); | 175 | BREAK_TO_DEBUGGER(); |
153 | return false; | 176 | return false; |
154 | } | 177 | } |
155 | 178 | ||
156 | if (!convert_to_custom_float_format(arr_points[0].slope, &fmt, | 179 | if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt, |
157 | &arr_points[0].custom_float_slope)) { | 180 | &corner_points[0].red.custom_float_slope)) { |
181 | BREAK_TO_DEBUGGER(); | ||
182 | return false; | ||
183 | } | ||
184 | if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt, | ||
185 | &corner_points[0].green.custom_float_slope)) { | ||
186 | BREAK_TO_DEBUGGER(); | ||
187 | return false; | ||
188 | } | ||
189 | if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt, | ||
190 | &corner_points[0].blue.custom_float_slope)) { | ||
158 | BREAK_TO_DEBUGGER(); | 191 | BREAK_TO_DEBUGGER(); |
159 | return false; | 192 | return false; |
160 | } | 193 | } |
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float( | |||
162 | fmt.mantissa_bits = 10; | 195 | fmt.mantissa_bits = 10; |
163 | fmt.sign = false; | 196 | fmt.sign = false; |
164 | 197 | ||
165 | if (!convert_to_custom_float_format(arr_points[1].x, &fmt, | 198 | if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt, |
166 | &arr_points[1].custom_float_x)) { | 199 | &corner_points[1].red.custom_float_x)) { |
167 | BREAK_TO_DEBUGGER(); | 200 | BREAK_TO_DEBUGGER(); |
168 | return false; | 201 | return false; |
169 | } | 202 | } |
170 | 203 | if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt, | |
171 | if (fixpoint == true) | 204 | &corner_points[1].green.custom_float_x)) { |
172 | arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y); | 205 | BREAK_TO_DEBUGGER(); |
173 | else if (!convert_to_custom_float_format(arr_points[1].y, &fmt, | 206 | return false; |
174 | &arr_points[1].custom_float_y)) { | 207 | } |
208 | if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt, | ||
209 | &corner_points[1].blue.custom_float_x)) { | ||
175 | BREAK_TO_DEBUGGER(); | 210 | BREAK_TO_DEBUGGER(); |
176 | return false; | 211 | return false; |
177 | } | 212 | } |
178 | 213 | ||
179 | if (!convert_to_custom_float_format(arr_points[1].slope, &fmt, | 214 | if (fixpoint == true) { |
180 | &arr_points[1].custom_float_slope)) { | 215 | corner_points[1].red.custom_float_y = |
216 | dc_fixpt_clamp_u0d14(corner_points[1].red.y); | ||
217 | corner_points[1].green.custom_float_y = | ||
218 | dc_fixpt_clamp_u0d14(corner_points[1].green.y); | ||
219 | corner_points[1].blue.custom_float_y = | ||
220 | dc_fixpt_clamp_u0d14(corner_points[1].blue.y); | ||
221 | } else { | ||
222 | if (!convert_to_custom_float_format(corner_points[1].red.y, | ||
223 | &fmt, &corner_points[1].red.custom_float_y)) { | ||
224 | BREAK_TO_DEBUGGER(); | ||
225 | return false; | ||
226 | } | ||
227 | if (!convert_to_custom_float_format(corner_points[1].green.y, | ||
228 | &fmt, &corner_points[1].green.custom_float_y)) { | ||
229 | BREAK_TO_DEBUGGER(); | ||
230 | return false; | ||
231 | } | ||
232 | if (!convert_to_custom_float_format(corner_points[1].blue.y, | ||
233 | &fmt, &corner_points[1].blue.custom_float_y)) { | ||
234 | BREAK_TO_DEBUGGER(); | ||
235 | return false; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt, | ||
240 | &corner_points[1].red.custom_float_slope)) { | ||
241 | BREAK_TO_DEBUGGER(); | ||
242 | return false; | ||
243 | } | ||
244 | if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt, | ||
245 | &corner_points[1].green.custom_float_slope)) { | ||
246 | BREAK_TO_DEBUGGER(); | ||
247 | return false; | ||
248 | } | ||
249 | if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt, | ||
250 | &corner_points[1].blue.custom_float_slope)) { | ||
181 | BREAK_TO_DEBUGGER(); | 251 | BREAK_TO_DEBUGGER(); |
182 | return false; | 252 | return false; |
183 | } | 253 | } |
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format( | |||
242 | const struct dc_transfer_func *output_tf, | 312 | const struct dc_transfer_func *output_tf, |
243 | struct pwl_params *lut_params, bool fixpoint) | 313 | struct pwl_params *lut_params, bool fixpoint) |
244 | { | 314 | { |
245 | struct curve_points *arr_points; | 315 | struct curve_points3 *corner_points; |
246 | struct pwl_result_data *rgb_resulted; | 316 | struct pwl_result_data *rgb_resulted; |
247 | struct pwl_result_data *rgb; | 317 | struct pwl_result_data *rgb; |
248 | struct pwl_result_data *rgb_plus_1; | 318 | struct pwl_result_data *rgb_plus_1; |
249 | struct fixed31_32 y_r; | ||
250 | struct fixed31_32 y_g; | ||
251 | struct fixed31_32 y_b; | ||
252 | struct fixed31_32 y1_min; | ||
253 | struct fixed31_32 y3_max; | ||
254 | 319 | ||
255 | int32_t region_start, region_end; | 320 | int32_t region_start, region_end; |
256 | int32_t i; | 321 | int32_t i; |
@@ -261,14 +326,14 @@ bool cm_helper_translate_curve_to_hw_format( | |||
261 | 326 | ||
262 | PERF_TRACE(); | 327 | PERF_TRACE(); |
263 | 328 | ||
264 | arr_points = lut_params->arr_points; | 329 | corner_points = lut_params->corner_points; |
265 | rgb_resulted = lut_params->rgb_resulted; | 330 | rgb_resulted = lut_params->rgb_resulted; |
266 | hw_points = 0; | 331 | hw_points = 0; |
267 | 332 | ||
268 | memset(lut_params, 0, sizeof(struct pwl_params)); | 333 | memset(lut_params, 0, sizeof(struct pwl_params)); |
269 | memset(seg_distr, 0, sizeof(seg_distr)); | 334 | memset(seg_distr, 0, sizeof(seg_distr)); |
270 | 335 | ||
271 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 336 | if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) { |
272 | /* 32 segments | 337 | /* 32 segments |
273 | * segments are from 2^-25 to 2^7 | 338 | * segments are from 2^-25 to 2^7 |
274 | */ | 339 | */ |
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format( | |||
327 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; | 392 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; |
328 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; | 393 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; |
329 | 394 | ||
330 | arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 395 | // All 3 color channels have same x |
396 | corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), | ||
331 | dc_fixpt_from_int(region_start)); | 397 | dc_fixpt_from_int(region_start)); |
332 | arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 398 | corner_points[0].green.x = corner_points[0].red.x; |
333 | dc_fixpt_from_int(region_end)); | 399 | corner_points[0].blue.x = corner_points[0].red.x; |
334 | 400 | ||
335 | y_r = rgb_resulted[0].red; | 401 | corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), |
336 | y_g = rgb_resulted[0].green; | 402 | dc_fixpt_from_int(region_end)); |
337 | y_b = rgb_resulted[0].blue; | 403 | corner_points[1].green.x = corner_points[1].red.x; |
404 | corner_points[1].blue.x = corner_points[1].red.x; | ||
338 | 405 | ||
339 | y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); | 406 | corner_points[0].red.y = rgb_resulted[0].red; |
407 | corner_points[0].green.y = rgb_resulted[0].green; | ||
408 | corner_points[0].blue.y = rgb_resulted[0].blue; | ||
340 | 409 | ||
341 | arr_points[0].y = y1_min; | 410 | corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y, |
342 | arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); | 411 | corner_points[0].red.x); |
343 | y_r = rgb_resulted[hw_points - 1].red; | 412 | corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y, |
344 | y_g = rgb_resulted[hw_points - 1].green; | 413 | corner_points[0].green.x); |
345 | y_b = rgb_resulted[hw_points - 1].blue; | 414 | corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y, |
415 | corner_points[0].blue.x); | ||
346 | 416 | ||
347 | /* see comment above, m_arrPoints[1].y should be the Y value for the | 417 | /* see comment above, m_arrPoints[1].y should be the Y value for the |
348 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) | 418 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) |
349 | */ | 419 | */ |
350 | y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); | 420 | corner_points[1].red.y = rgb_resulted[hw_points - 1].red; |
351 | 421 | corner_points[1].green.y = rgb_resulted[hw_points - 1].green; | |
352 | arr_points[1].y = y3_max; | 422 | corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; |
353 | 423 | corner_points[1].red.slope = dc_fixpt_zero; | |
354 | arr_points[1].slope = dc_fixpt_zero; | 424 | corner_points[1].green.slope = dc_fixpt_zero; |
425 | corner_points[1].blue.slope = dc_fixpt_zero; | ||
355 | 426 | ||
356 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 427 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { |
357 | /* for PQ, we want to have a straight line from last HW X point, | 428 | /* for PQ, we want to have a straight line from last HW X point, |
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format( | |||
360 | const struct fixed31_32 end_value = | 431 | const struct fixed31_32 end_value = |
361 | dc_fixpt_from_int(125); | 432 | dc_fixpt_from_int(125); |
362 | 433 | ||
363 | arr_points[1].slope = dc_fixpt_div( | 434 | corner_points[1].red.slope = dc_fixpt_div( |
364 | dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), | 435 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), |
365 | dc_fixpt_sub(end_value, arr_points[1].x)); | 436 | dc_fixpt_sub(end_value, corner_points[1].red.x)); |
437 | corner_points[1].green.slope = dc_fixpt_div( | ||
438 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), | ||
439 | dc_fixpt_sub(end_value, corner_points[1].green.x)); | ||
440 | corner_points[1].blue.slope = dc_fixpt_div( | ||
441 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), | ||
442 | dc_fixpt_sub(end_value, corner_points[1].blue.x)); | ||
366 | } | 443 | } |
367 | 444 | ||
368 | lut_params->hw_points_num = hw_points; | 445 | lut_params->hw_points_num = hw_points; |
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format( | |||
411 | ++i; | 488 | ++i; |
412 | } | 489 | } |
413 | cm_helper_convert_to_custom_float(rgb_resulted, | 490 | cm_helper_convert_to_custom_float(rgb_resulted, |
414 | lut_params->arr_points, | 491 | lut_params->corner_points, |
415 | hw_points, fixpoint); | 492 | hw_points, fixpoint); |
416 | 493 | ||
417 | return true; | 494 | return true; |
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
424 | const struct dc_transfer_func *output_tf, | 501 | const struct dc_transfer_func *output_tf, |
425 | struct pwl_params *lut_params) | 502 | struct pwl_params *lut_params) |
426 | { | 503 | { |
427 | struct curve_points *arr_points; | 504 | struct curve_points3 *corner_points; |
428 | struct pwl_result_data *rgb_resulted; | 505 | struct pwl_result_data *rgb_resulted; |
429 | struct pwl_result_data *rgb; | 506 | struct pwl_result_data *rgb; |
430 | struct pwl_result_data *rgb_plus_1; | 507 | struct pwl_result_data *rgb_plus_1; |
431 | struct fixed31_32 y_r; | ||
432 | struct fixed31_32 y_g; | ||
433 | struct fixed31_32 y_b; | ||
434 | struct fixed31_32 y1_min; | ||
435 | struct fixed31_32 y3_max; | ||
436 | 508 | ||
437 | int32_t region_start, region_end; | 509 | int32_t region_start, region_end; |
438 | int32_t i; | 510 | int32_t i; |
@@ -443,7 +515,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
443 | 515 | ||
444 | PERF_TRACE(); | 516 | PERF_TRACE(); |
445 | 517 | ||
446 | arr_points = lut_params->arr_points; | 518 | corner_points = lut_params->corner_points; |
447 | rgb_resulted = lut_params->rgb_resulted; | 519 | rgb_resulted = lut_params->rgb_resulted; |
448 | hw_points = 0; | 520 | hw_points = 0; |
449 | 521 | ||
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
489 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; | 561 | rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; |
490 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; | 562 | rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; |
491 | 563 | ||
492 | arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 564 | corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), |
493 | dc_fixpt_from_int(region_start)); | 565 | dc_fixpt_from_int(region_start)); |
494 | arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), | 566 | corner_points[0].green.x = corner_points[0].red.x; |
567 | corner_points[0].blue.x = corner_points[0].red.x; | ||
568 | corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2), | ||
495 | dc_fixpt_from_int(region_end)); | 569 | dc_fixpt_from_int(region_end)); |
570 | corner_points[1].green.x = corner_points[1].red.x; | ||
571 | corner_points[1].blue.x = corner_points[1].red.x; | ||
496 | 572 | ||
497 | y_r = rgb_resulted[0].red; | 573 | corner_points[0].red.y = rgb_resulted[0].red; |
498 | y_g = rgb_resulted[0].green; | 574 | corner_points[0].green.y = rgb_resulted[0].green; |
499 | y_b = rgb_resulted[0].blue; | 575 | corner_points[0].blue.y = rgb_resulted[0].blue; |
500 | |||
501 | y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); | ||
502 | |||
503 | arr_points[0].y = y1_min; | ||
504 | arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); | ||
505 | y_r = rgb_resulted[hw_points - 1].red; | ||
506 | y_g = rgb_resulted[hw_points - 1].green; | ||
507 | y_b = rgb_resulted[hw_points - 1].blue; | ||
508 | 576 | ||
509 | /* see comment above, m_arrPoints[1].y should be the Y value for the | 577 | /* see comment above, m_arrPoints[1].y should be the Y value for the |
510 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) | 578 | * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) |
511 | */ | 579 | */ |
512 | y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); | 580 | corner_points[1].red.y = rgb_resulted[hw_points - 1].red; |
513 | 581 | corner_points[1].green.y = rgb_resulted[hw_points - 1].green; | |
514 | arr_points[1].y = y3_max; | 582 | corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue; |
515 | 583 | corner_points[1].red.slope = dc_fixpt_zero; | |
516 | arr_points[1].slope = dc_fixpt_zero; | 584 | corner_points[1].green.slope = dc_fixpt_zero; |
585 | corner_points[1].blue.slope = dc_fixpt_zero; | ||
517 | 586 | ||
518 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { | 587 | if (output_tf->tf == TRANSFER_FUNCTION_PQ) { |
519 | /* for PQ, we want to have a straight line from last HW X point, | 588 | /* for PQ, we want to have a straight line from last HW X point, |
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
522 | const struct fixed31_32 end_value = | 591 | const struct fixed31_32 end_value = |
523 | dc_fixpt_from_int(125); | 592 | dc_fixpt_from_int(125); |
524 | 593 | ||
525 | arr_points[1].slope = dc_fixpt_div( | 594 | corner_points[1].red.slope = dc_fixpt_div( |
526 | dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), | 595 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y), |
527 | dc_fixpt_sub(end_value, arr_points[1].x)); | 596 | dc_fixpt_sub(end_value, corner_points[1].red.x)); |
597 | corner_points[1].green.slope = dc_fixpt_div( | ||
598 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y), | ||
599 | dc_fixpt_sub(end_value, corner_points[1].green.x)); | ||
600 | corner_points[1].blue.slope = dc_fixpt_div( | ||
601 | dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y), | ||
602 | dc_fixpt_sub(end_value, corner_points[1].blue.x)); | ||
528 | } | 603 | } |
529 | 604 | ||
530 | lut_params->hw_points_num = hw_points; | 605 | lut_params->hw_points_num = hw_points; |
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format( | |||
564 | ++i; | 639 | ++i; |
565 | } | 640 | } |
566 | cm_helper_convert_to_custom_float(rgb_resulted, | 641 | cm_helper_convert_to_custom_float(rgb_resulted, |
567 | lut_params->arr_points, | 642 | lut_params->corner_points, |
568 | hw_points, false); | 643 | hw_points, false); |
569 | 644 | ||
570 | return true; | 645 | return true; |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h index 7a531b02871f..5ae4d69391a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h | |||
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func( | |||
98 | 98 | ||
99 | bool cm_helper_convert_to_custom_float( | 99 | bool cm_helper_convert_to_custom_float( |
100 | struct pwl_result_data *rgb_resulted, | 100 | struct pwl_result_data *rgb_resulted, |
101 | struct curve_points *arr_points, | 101 | struct curve_points3 *corner_points, |
102 | uint32_t hw_points_num, | 102 | uint32_t hw_points_num, |
103 | bool fixpoint); | 103 | bool fixpoint); |
104 | 104 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 193184affefb..87495dea45ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include "dcn10_hubbub.h" | 45 | #include "dcn10_hubbub.h" |
46 | #include "dcn10_cm_common.h" | 46 | #include "dcn10_cm_common.h" |
47 | #include "dc_link_dp.h" | 47 | #include "dc_link_dp.h" |
48 | #include "dccg.h" | ||
48 | 49 | ||
49 | #define DC_LOGGER_INIT(logger) | 50 | #define DC_LOGGER_INIT(logger) |
50 | 51 | ||
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
786 | &dc->current_state->res_ctx.pipe_ctx[i]; | 787 | &dc->current_state->res_ctx.pipe_ctx[i]; |
787 | if (pipe_ctx != NULL) { | 788 | if (pipe_ctx != NULL) { |
788 | hubp = pipe_ctx->plane_res.hubp; | 789 | hubp = pipe_ctx->plane_res.hubp; |
789 | if (hubp != NULL) { | 790 | if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) { |
790 | if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { | 791 | if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { |
791 | /* one pipe underflow, we will reset all the pipes*/ | 792 | /* one pipe underflow, we will reset all the pipes*/ |
792 | need_recover = true; | 793 | need_recover = true; |
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
812 | if (pipe_ctx != NULL) { | 813 | if (pipe_ctx != NULL) { |
813 | hubp = pipe_ctx->plane_res.hubp; | 814 | hubp = pipe_ctx->plane_res.hubp; |
814 | /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ | 815 | /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ |
815 | if (hubp != NULL) | 816 | if (hubp != NULL && hubp->funcs->set_hubp_blank_en) |
816 | hubp->funcs->set_hubp_blank_en(hubp, true); | 817 | hubp->funcs->set_hubp_blank_en(hubp, true); |
817 | } | 818 | } |
818 | } | 819 | } |
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
825 | if (pipe_ctx != NULL) { | 826 | if (pipe_ctx != NULL) { |
826 | hubp = pipe_ctx->plane_res.hubp; | 827 | hubp = pipe_ctx->plane_res.hubp; |
827 | /*DCHUBP_CNTL:HUBP_DISABLE=1*/ | 828 | /*DCHUBP_CNTL:HUBP_DISABLE=1*/ |
828 | if (hubp != NULL) | 829 | if (hubp != NULL && hubp->funcs->hubp_disable_control) |
829 | hubp->funcs->hubp_disable_control(hubp, true); | 830 | hubp->funcs->hubp_disable_control(hubp, true); |
830 | } | 831 | } |
831 | } | 832 | } |
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
835 | if (pipe_ctx != NULL) { | 836 | if (pipe_ctx != NULL) { |
836 | hubp = pipe_ctx->plane_res.hubp; | 837 | hubp = pipe_ctx->plane_res.hubp; |
837 | /*DCHUBP_CNTL:HUBP_DISABLE=0*/ | 838 | /*DCHUBP_CNTL:HUBP_DISABLE=0*/ |
838 | if (hubp != NULL) | 839 | if (hubp != NULL && hubp->funcs->hubp_disable_control) |
839 | hubp->funcs->hubp_disable_control(hubp, true); | 840 | hubp->funcs->hubp_disable_control(hubp, true); |
840 | } | 841 | } |
841 | } | 842 | } |
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc) | |||
847 | if (pipe_ctx != NULL) { | 848 | if (pipe_ctx != NULL) { |
848 | hubp = pipe_ctx->plane_res.hubp; | 849 | hubp = pipe_ctx->plane_res.hubp; |
849 | /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ | 850 | /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ |
850 | if (hubp != NULL) | 851 | if (hubp != NULL && hubp->funcs->set_hubp_blank_en) |
851 | hubp->funcs->set_hubp_blank_en(hubp, true); | 852 | hubp->funcs->set_hubp_blank_en(hubp, true); |
852 | } | 853 | } |
853 | } | 854 | } |
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc) | |||
1126 | 1127 | ||
1127 | enable_power_gating_plane(dc->hwseq, true); | 1128 | enable_power_gating_plane(dc->hwseq, true); |
1128 | 1129 | ||
1129 | memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks)); | 1130 | memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks)); |
1130 | } | 1131 | } |
1131 | 1132 | ||
1132 | static void reset_hw_ctx_wrap( | 1133 | static void reset_hw_ctx_wrap( |
@@ -1603,7 +1604,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1, | |||
1603 | } | 1604 | } |
1604 | 1605 | ||
1605 | 1606 | ||
1606 | static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) | 1607 | void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) |
1607 | { | 1608 | { |
1608 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); | 1609 | struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); |
1609 | struct vm_system_aperture_param apt = { {{ 0 } } }; | 1610 | struct vm_system_aperture_param apt = { {{ 0 } } }; |
@@ -1703,33 +1704,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx) | |||
1703 | pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); | 1704 | pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); |
1704 | } | 1705 | } |
1705 | 1706 | ||
1706 | 1707 | static void dcn10_program_output_csc(struct dc *dc, | |
1707 | static void program_csc_matrix(struct pipe_ctx *pipe_ctx, | 1708 | struct pipe_ctx *pipe_ctx, |
1708 | enum dc_color_space colorspace, | 1709 | enum dc_color_space colorspace, |
1709 | uint16_t *matrix) | 1710 | uint16_t *matrix, |
1711 | int opp_id) | ||
1710 | { | 1712 | { |
1711 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { | 1713 | if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { |
1712 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) | 1714 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) |
1713 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); | 1715 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix); |
1714 | } else { | 1716 | } else { |
1715 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) | 1717 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL) |
1716 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); | 1718 | pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); |
1717 | } | 1719 | } |
1718 | } | 1720 | } |
1719 | 1721 | ||
1720 | static void dcn10_program_output_csc(struct dc *dc, | 1722 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1721 | struct pipe_ctx *pipe_ctx, | ||
1722 | enum dc_color_space colorspace, | ||
1723 | uint16_t *matrix, | ||
1724 | int opp_id) | ||
1725 | { | ||
1726 | if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) | ||
1727 | program_csc_matrix(pipe_ctx, | ||
1728 | colorspace, | ||
1729 | matrix); | ||
1730 | } | ||
1731 | |||
1732 | static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | ||
1733 | { | 1723 | { |
1734 | if (pipe_ctx->plane_state->visible) | 1724 | if (pipe_ctx->plane_state->visible) |
1735 | return true; | 1725 | return true; |
@@ -1738,7 +1728,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | |||
1738 | return false; | 1728 | return false; |
1739 | } | 1729 | } |
1740 | 1730 | ||
1741 | static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | 1731 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1742 | { | 1732 | { |
1743 | if (pipe_ctx->plane_state->visible) | 1733 | if (pipe_ctx->plane_state->visible) |
1744 | return true; | 1734 | return true; |
@@ -1747,7 +1737,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | |||
1747 | return false; | 1737 | return false; |
1748 | } | 1738 | } |
1749 | 1739 | ||
1750 | static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) | 1740 | bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx) |
1751 | { | 1741 | { |
1752 | if (pipe_ctx->plane_state->visible) | 1742 | if (pipe_ctx->plane_state->visible) |
1753 | return true; | 1743 | return true; |
@@ -1943,10 +1933,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) | |||
1943 | struct mpc *mpc = dc->res_pool->mpc; | 1933 | struct mpc *mpc = dc->res_pool->mpc; |
1944 | struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); | 1934 | struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); |
1945 | 1935 | ||
1946 | |||
1947 | |||
1948 | /* TODO: proper fix once fpga works */ | ||
1949 | |||
1950 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { | 1936 | if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) { |
1951 | dcn10_get_hdr_visual_confirm_color( | 1937 | dcn10_get_hdr_visual_confirm_color( |
1952 | pipe_ctx, &blnd_cfg.black_color); | 1938 | pipe_ctx, &blnd_cfg.black_color); |
@@ -2026,8 +2012,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) | |||
2026 | bool per_pixel_alpha = | 2012 | bool per_pixel_alpha = |
2027 | pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; | 2013 | pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe; |
2028 | 2014 | ||
2029 | /* TODO: proper fix once fpga works */ | ||
2030 | |||
2031 | pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; | 2015 | pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha; |
2032 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; | 2016 | pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; |
2033 | /* scaler configuration */ | 2017 | /* scaler configuration */ |
@@ -2035,7 +2019,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx) | |||
2035 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); | 2019 | pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); |
2036 | } | 2020 | } |
2037 | 2021 | ||
2038 | static void update_dchubp_dpp( | 2022 | void update_dchubp_dpp( |
2039 | struct dc *dc, | 2023 | struct dc *dc, |
2040 | struct pipe_ctx *pipe_ctx, | 2024 | struct pipe_ctx *pipe_ctx, |
2041 | struct dc_state *context) | 2025 | struct dc_state *context) |
@@ -2052,16 +2036,22 @@ static void update_dchubp_dpp( | |||
2052 | */ | 2036 | */ |
2053 | if (plane_state->update_flags.bits.full_update) { | 2037 | if (plane_state->update_flags.bits.full_update) { |
2054 | bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= | 2038 | bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <= |
2055 | dc->res_pool->dccg->clks.dispclk_khz / 2; | 2039 | dc->res_pool->clk_mgr->clks.dispclk_khz / 2; |
2056 | 2040 | ||
2057 | dpp->funcs->dpp_dppclk_control( | 2041 | dpp->funcs->dpp_dppclk_control( |
2058 | dpp, | 2042 | dpp, |
2059 | should_divided_by_2, | 2043 | should_divided_by_2, |
2060 | true); | 2044 | true); |
2061 | 2045 | ||
2062 | dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ? | 2046 | if (dc->res_pool->dccg) |
2063 | dc->res_pool->dccg->clks.dispclk_khz / 2 : | 2047 | dc->res_pool->dccg->funcs->update_dpp_dto( |
2064 | dc->res_pool->dccg->clks.dispclk_khz; | 2048 | dc->res_pool->dccg, |
2049 | dpp->inst, | ||
2050 | pipe_ctx->plane_res.bw.calc.dppclk_khz); | ||
2051 | else | ||
2052 | dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? | ||
2053 | dc->res_pool->clk_mgr->clks.dispclk_khz / 2 : | ||
2054 | dc->res_pool->clk_mgr->clks.dispclk_khz; | ||
2065 | } | 2055 | } |
2066 | 2056 | ||
2067 | /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG | 2057 | /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG |
@@ -2182,7 +2172,7 @@ static void dcn10_blank_pixel_data( | |||
2182 | } | 2172 | } |
2183 | } | 2173 | } |
2184 | 2174 | ||
2185 | static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) | 2175 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) |
2186 | { | 2176 | { |
2187 | struct fixed31_32 multiplier = dc_fixpt_from_fraction( | 2177 | struct fixed31_32 multiplier = dc_fixpt_from_fraction( |
2188 | pipe_ctx->plane_state->sdr_white_level, 80); | 2178 | pipe_ctx->plane_state->sdr_white_level, 80); |
@@ -2257,47 +2247,7 @@ static void program_all_pipe_in_tree( | |||
2257 | } | 2247 | } |
2258 | } | 2248 | } |
2259 | 2249 | ||
2260 | static void dcn10_pplib_apply_display_requirements( | 2250 | struct pipe_ctx *find_top_pipe_for_stream( |
2261 | struct dc *dc, | ||
2262 | struct dc_state *context) | ||
2263 | { | ||
2264 | struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; | ||
2265 | |||
2266 | pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz; | ||
2267 | pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz; | ||
2268 | pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; | ||
2269 | pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz; | ||
2270 | pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz; | ||
2271 | pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; | ||
2272 | dce110_fill_display_configs(context, pp_display_cfg); | ||
2273 | |||
2274 | if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof( | ||
2275 | struct dm_pp_display_configuration)) != 0) | ||
2276 | dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); | ||
2277 | |||
2278 | dc->prev_display_config = *pp_display_cfg; | ||
2279 | } | ||
2280 | |||
2281 | static void optimize_shared_resources(struct dc *dc) | ||
2282 | { | ||
2283 | if (dc->current_state->stream_count == 0) { | ||
2284 | /* S0i2 message */ | ||
2285 | dcn10_pplib_apply_display_requirements(dc, dc->current_state); | ||
2286 | } | ||
2287 | |||
2288 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2289 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2290 | } | ||
2291 | |||
2292 | static void ready_shared_resources(struct dc *dc, struct dc_state *context) | ||
2293 | { | ||
2294 | /* S0i2 message */ | ||
2295 | if (dc->current_state->stream_count == 0 && | ||
2296 | context->stream_count != 0) | ||
2297 | dcn10_pplib_apply_display_requirements(dc, context); | ||
2298 | } | ||
2299 | |||
2300 | static struct pipe_ctx *find_top_pipe_for_stream( | ||
2301 | struct dc *dc, | 2251 | struct dc *dc, |
2302 | struct dc_state *context, | 2252 | struct dc_state *context, |
2303 | const struct dc_stream_state *stream) | 2253 | const struct dc_stream_state *stream) |
@@ -2398,10 +2348,9 @@ static void dcn10_apply_ctx_for_surface( | |||
2398 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); | 2348 | hubbub1_wm_change_req_wa(dc->res_pool->hubbub); |
2399 | } | 2349 | } |
2400 | 2350 | ||
2401 | static void dcn10_set_bandwidth( | 2351 | static void dcn10_prepare_bandwidth( |
2402 | struct dc *dc, | 2352 | struct dc *dc, |
2403 | struct dc_state *context, | 2353 | struct dc_state *context) |
2404 | bool safe_to_lower) | ||
2405 | { | 2354 | { |
2406 | if (dc->debug.sanity_checks) | 2355 | if (dc->debug.sanity_checks) |
2407 | dcn10_verify_allow_pstate_change_high(dc); | 2356 | dcn10_verify_allow_pstate_change_high(dc); |
@@ -2410,12 +2359,39 @@ static void dcn10_set_bandwidth( | |||
2410 | if (context->stream_count == 0) | 2359 | if (context->stream_count == 0) |
2411 | context->bw.dcn.clk.phyclk_khz = 0; | 2360 | context->bw.dcn.clk.phyclk_khz = 0; |
2412 | 2361 | ||
2413 | dc->res_pool->dccg->funcs->update_clocks( | 2362 | dc->res_pool->clk_mgr->funcs->update_clocks( |
2414 | dc->res_pool->dccg, | 2363 | dc->res_pool->clk_mgr, |
2415 | &context->bw.dcn.clk, | 2364 | context, |
2416 | safe_to_lower); | 2365 | false); |
2366 | } | ||
2417 | 2367 | ||
2418 | dcn10_pplib_apply_display_requirements(dc, context); | 2368 | hubbub1_program_watermarks(dc->res_pool->hubbub, |
2369 | &context->bw.dcn.watermarks, | ||
2370 | dc->res_pool->ref_clock_inKhz / 1000, | ||
2371 | true); | ||
2372 | |||
2373 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2374 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2375 | |||
2376 | if (dc->debug.sanity_checks) | ||
2377 | dcn10_verify_allow_pstate_change_high(dc); | ||
2378 | } | ||
2379 | |||
2380 | static void dcn10_optimize_bandwidth( | ||
2381 | struct dc *dc, | ||
2382 | struct dc_state *context) | ||
2383 | { | ||
2384 | if (dc->debug.sanity_checks) | ||
2385 | dcn10_verify_allow_pstate_change_high(dc); | ||
2386 | |||
2387 | if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { | ||
2388 | if (context->stream_count == 0) | ||
2389 | context->bw.dcn.clk.phyclk_khz = 0; | ||
2390 | |||
2391 | dc->res_pool->clk_mgr->funcs->update_clocks( | ||
2392 | dc->res_pool->clk_mgr, | ||
2393 | context, | ||
2394 | true); | ||
2419 | } | 2395 | } |
2420 | 2396 | ||
2421 | hubbub1_program_watermarks(dc->res_pool->hubbub, | 2397 | hubbub1_program_watermarks(dc->res_pool->hubbub, |
@@ -2423,6 +2399,9 @@ static void dcn10_set_bandwidth( | |||
2423 | dc->res_pool->ref_clock_inKhz / 1000, | 2399 | dc->res_pool->ref_clock_inKhz / 1000, |
2424 | true); | 2400 | true); |
2425 | 2401 | ||
2402 | if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) | ||
2403 | dcn_bw_notify_pplib_of_wm_ranges(dc); | ||
2404 | |||
2426 | if (dc->debug.sanity_checks) | 2405 | if (dc->debug.sanity_checks) |
2427 | dcn10_verify_allow_pstate_change_high(dc); | 2406 | dcn10_verify_allow_pstate_change_high(dc); |
2428 | } | 2407 | } |
@@ -2694,7 +2673,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx) | |||
2694 | 2673 | ||
2695 | static const struct hw_sequencer_funcs dcn10_funcs = { | 2674 | static const struct hw_sequencer_funcs dcn10_funcs = { |
2696 | .program_gamut_remap = program_gamut_remap, | 2675 | .program_gamut_remap = program_gamut_remap, |
2697 | .program_csc_matrix = program_csc_matrix, | ||
2698 | .init_hw = dcn10_init_hw, | 2676 | .init_hw = dcn10_init_hw, |
2699 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, | 2677 | .apply_ctx_to_hw = dce110_apply_ctx_to_hw, |
2700 | .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, | 2678 | .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, |
@@ -2721,7 +2699,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
2721 | .disable_plane = dcn10_disable_plane, | 2699 | .disable_plane = dcn10_disable_plane, |
2722 | .blank_pixel_data = dcn10_blank_pixel_data, | 2700 | .blank_pixel_data = dcn10_blank_pixel_data, |
2723 | .pipe_control_lock = dcn10_pipe_control_lock, | 2701 | .pipe_control_lock = dcn10_pipe_control_lock, |
2724 | .set_bandwidth = dcn10_set_bandwidth, | 2702 | .prepare_bandwidth = dcn10_prepare_bandwidth, |
2703 | .optimize_bandwidth = dcn10_optimize_bandwidth, | ||
2725 | .reset_hw_ctx_wrap = reset_hw_ctx_wrap, | 2704 | .reset_hw_ctx_wrap = reset_hw_ctx_wrap, |
2726 | .enable_stream_timing = dcn10_enable_stream_timing, | 2705 | .enable_stream_timing = dcn10_enable_stream_timing, |
2727 | .set_drr = set_drr, | 2706 | .set_drr = set_drr, |
@@ -2732,10 +2711,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { | |||
2732 | .log_hw_state = dcn10_log_hw_state, | 2711 | .log_hw_state = dcn10_log_hw_state, |
2733 | .get_hw_state = dcn10_get_hw_state, | 2712 | .get_hw_state = dcn10_get_hw_state, |
2734 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, | 2713 | .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, |
2735 | .ready_shared_resources = ready_shared_resources, | ||
2736 | .optimize_shared_resources = optimize_shared_resources, | ||
2737 | .pplib_apply_display_requirements = | ||
2738 | dcn10_pplib_apply_display_requirements, | ||
2739 | .edp_backlight_control = hwss_edp_backlight_control, | 2714 | .edp_backlight_control = hwss_edp_backlight_control, |
2740 | .edp_power_control = hwss_edp_power_control, | 2715 | .edp_power_control = hwss_edp_power_control, |
2741 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, | 2716 | .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready, |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 84d461e0ed3e..5e5610c9e600 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | |||
@@ -51,4 +51,24 @@ void dcn10_get_hw_state( | |||
51 | char *pBuf, unsigned int bufSize, | 51 | char *pBuf, unsigned int bufSize, |
52 | unsigned int mask); | 52 | unsigned int mask); |
53 | 53 | ||
54 | bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
55 | |||
56 | bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
57 | |||
58 | bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx); | ||
59 | |||
60 | void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp); | ||
61 | |||
62 | void set_hdr_multiplier(struct pipe_ctx *pipe_ctx); | ||
63 | |||
64 | void update_dchubp_dpp( | ||
65 | struct dc *dc, | ||
66 | struct pipe_ctx *pipe_ctx, | ||
67 | struct dc_state *context); | ||
68 | |||
69 | struct pipe_ctx *find_top_pipe_for_stream( | ||
70 | struct dc *dc, | ||
71 | struct dc_state *context, | ||
72 | const struct dc_stream_state *stream); | ||
73 | |||
54 | #endif /* __DC_HWSS_DCN10_H__ */ | 74 | #endif /* __DC_HWSS_DCN10_H__ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index ba6a8686062f..477ab9222216 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | |||
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output( | |||
589 | return false; | 589 | return false; |
590 | 590 | ||
591 | /* DCE11 HW does not support 420 */ | 591 | /* DCE11 HW does not support 420 */ |
592 | if (!enc10->base.features.ycbcr420_supported && | 592 | if (!enc10->base.features.hdmi_ycbcr420_supported && |
593 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 593 | crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) |
594 | return false; | 594 | return false; |
595 | 595 | ||
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output( | |||
606 | const struct dcn10_link_encoder *enc10, | 606 | const struct dcn10_link_encoder *enc10, |
607 | const struct dc_crtc_timing *crtc_timing) | 607 | const struct dc_crtc_timing *crtc_timing) |
608 | { | 608 | { |
609 | if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) | 609 | if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { |
610 | return false; | 610 | if (!enc10->base.features.dp_ycbcr420_supported) |
611 | return false; | ||
612 | } | ||
611 | 613 | ||
612 | return true; | 614 | return true; |
613 | } | 615 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 54626682bab2..7d1f66797cb3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | |||
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc) | |||
87 | REG_SET(OTG_STEREO_CONTROL, 0, | 87 | REG_SET(OTG_STEREO_CONTROL, 0, |
88 | OTG_STEREO_EN, 0); | 88 | OTG_STEREO_EN, 0); |
89 | 89 | ||
90 | REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0, | 90 | REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0, |
91 | OTG_3D_STRUCTURE_EN, 0, | 91 | OTG_3D_STRUCTURE_EN, 0, |
92 | OTG_3D_STRUCTURE_V_UPDATE_MODE, 0, | ||
93 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); | 92 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); |
94 | } | 93 | } |
95 | 94 | ||
@@ -274,10 +273,12 @@ void optc1_program_timing( | |||
274 | * program the reg for interrupt postition. | 273 | * program the reg for interrupt postition. |
275 | */ | 274 | */ |
276 | vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; | 275 | vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; |
277 | if (vertical_line_start < 0) { | 276 | v_fp2 = 0; |
278 | ASSERT(0); | 277 | if (vertical_line_start < 0) |
278 | v_fp2 = -vertical_line_start; | ||
279 | if (vertical_line_start < 0) | ||
279 | vertical_line_start = 0; | 280 | vertical_line_start = 0; |
280 | } | 281 | |
281 | REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, | 282 | REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0, |
282 | OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); | 283 | OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start); |
283 | 284 | ||
@@ -296,9 +297,6 @@ void optc1_program_timing( | |||
296 | if (patched_crtc_timing.flags.INTERLACE == 1) | 297 | if (patched_crtc_timing.flags.INTERLACE == 1) |
297 | field_num = 1; | 298 | field_num = 1; |
298 | } | 299 | } |
299 | v_fp2 = 0; | ||
300 | if (optc->dlg_otg_param.vstartup_start > asic_blank_end) | ||
301 | v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end; | ||
302 | 300 | ||
303 | /* Interlace */ | 301 | /* Interlace */ |
304 | if (patched_crtc_timing.flags.INTERLACE == 1) { | 302 | if (patched_crtc_timing.flags.INTERLACE == 1) { |
@@ -1155,9 +1153,8 @@ static void optc1_enable_stereo(struct timing_generator *optc, | |||
1155 | OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); | 1153 | OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1); |
1156 | 1154 | ||
1157 | if (flags->PROGRAM_STEREO) | 1155 | if (flags->PROGRAM_STEREO) |
1158 | REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL, | 1156 | REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL, |
1159 | OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, | 1157 | OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED, |
1160 | OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED, | ||
1161 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); | 1158 | OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED); |
1162 | 1159 | ||
1163 | } | 1160 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index a71453a15ae3..47dbe4bb294a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | |||
@@ -28,23 +28,23 @@ | |||
28 | 28 | ||
29 | #include "resource.h" | 29 | #include "resource.h" |
30 | #include "include/irq_service_interface.h" | 30 | #include "include/irq_service_interface.h" |
31 | #include "dcn10/dcn10_resource.h" | 31 | #include "dcn10_resource.h" |
32 | 32 | ||
33 | #include "dcn10/dcn10_ipp.h" | 33 | #include "dcn10_ipp.h" |
34 | #include "dcn10/dcn10_mpc.h" | 34 | #include "dcn10_mpc.h" |
35 | #include "irq/dcn10/irq_service_dcn10.h" | 35 | #include "irq/dcn10/irq_service_dcn10.h" |
36 | #include "dcn10/dcn10_dpp.h" | 36 | #include "dcn10_dpp.h" |
37 | #include "dcn10_optc.h" | 37 | #include "dcn10_optc.h" |
38 | #include "dcn10/dcn10_hw_sequencer.h" | 38 | #include "dcn10_hw_sequencer.h" |
39 | #include "dce110/dce110_hw_sequencer.h" | 39 | #include "dce110/dce110_hw_sequencer.h" |
40 | #include "dcn10/dcn10_opp.h" | 40 | #include "dcn10_opp.h" |
41 | #include "dcn10/dcn10_link_encoder.h" | 41 | #include "dcn10_link_encoder.h" |
42 | #include "dcn10/dcn10_stream_encoder.h" | 42 | #include "dcn10_stream_encoder.h" |
43 | #include "dce/dce_clocks.h" | 43 | #include "dcn10_clk_mgr.h" |
44 | #include "dce/dce_clock_source.h" | 44 | #include "dce/dce_clock_source.h" |
45 | #include "dce/dce_audio.h" | 45 | #include "dce/dce_audio.h" |
46 | #include "dce/dce_hwseq.h" | 46 | #include "dce/dce_hwseq.h" |
47 | #include "../virtual/virtual_stream_encoder.h" | 47 | #include "virtual/virtual_stream_encoder.h" |
48 | #include "dce110/dce110_resource.h" | 48 | #include "dce110/dce110_resource.h" |
49 | #include "dce112/dce112_resource.h" | 49 | #include "dce112/dce112_resource.h" |
50 | #include "dcn10_hubp.h" | 50 | #include "dcn10_hubp.h" |
@@ -438,6 +438,7 @@ static const struct dcn_optc_mask tg_mask = { | |||
438 | 438 | ||
439 | 439 | ||
440 | static const struct bios_registers bios_regs = { | 440 | static const struct bios_registers bios_regs = { |
441 | NBIO_SR(BIOS_SCRATCH_0), | ||
441 | NBIO_SR(BIOS_SCRATCH_3), | 442 | NBIO_SR(BIOS_SCRATCH_3), |
442 | NBIO_SR(BIOS_SCRATCH_6) | 443 | NBIO_SR(BIOS_SCRATCH_6) |
443 | }; | 444 | }; |
@@ -719,7 +720,8 @@ static struct timing_generator *dcn10_timing_generator_create( | |||
719 | static const struct encoder_feature_support link_enc_feature = { | 720 | static const struct encoder_feature_support link_enc_feature = { |
720 | .max_hdmi_deep_color = COLOR_DEPTH_121212, | 721 | .max_hdmi_deep_color = COLOR_DEPTH_121212, |
721 | .max_hdmi_pixel_clock = 600000, | 722 | .max_hdmi_pixel_clock = 600000, |
722 | .ycbcr420_supported = true, | 723 | .hdmi_ycbcr420_supported = true, |
724 | .dp_ycbcr420_supported = false, | ||
723 | .flags.bits.IS_HBR2_CAPABLE = true, | 725 | .flags.bits.IS_HBR2_CAPABLE = true, |
724 | .flags.bits.IS_HBR3_CAPABLE = true, | 726 | .flags.bits.IS_HBR3_CAPABLE = true, |
725 | .flags.bits.IS_TPS3_CAPABLE = true, | 727 | .flags.bits.IS_TPS3_CAPABLE = true, |
@@ -949,8 +951,8 @@ static void destruct(struct dcn10_resource_pool *pool) | |||
949 | if (pool->base.dmcu != NULL) | 951 | if (pool->base.dmcu != NULL) |
950 | dce_dmcu_destroy(&pool->base.dmcu); | 952 | dce_dmcu_destroy(&pool->base.dmcu); |
951 | 953 | ||
952 | if (pool->base.dccg != NULL) | 954 | if (pool->base.clk_mgr != NULL) |
953 | dce_dccg_destroy(&pool->base.dccg); | 955 | dce_clk_mgr_destroy(&pool->base.clk_mgr); |
954 | 956 | ||
955 | kfree(pool->base.pp_smu); | 957 | kfree(pool->base.pp_smu); |
956 | } | 958 | } |
@@ -1276,8 +1278,8 @@ static bool construct( | |||
1276 | } | 1278 | } |
1277 | } | 1279 | } |
1278 | 1280 | ||
1279 | pool->base.dccg = dcn1_dccg_create(ctx); | 1281 | pool->base.clk_mgr = dcn1_clk_mgr_create(ctx); |
1280 | if (pool->base.dccg == NULL) { | 1282 | if (pool->base.clk_mgr == NULL) { |
1281 | dm_error("DC: failed to create display clock!\n"); | 1283 | dm_error("DC: failed to create display clock!\n"); |
1282 | BREAK_TO_DEBUGGER(); | 1284 | BREAK_TO_DEBUGGER(); |
1283 | goto fail; | 1285 | goto fail; |
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index f2ea8452d48f..beb08fd12b1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h | |||
@@ -55,10 +55,10 @@ struct pp_smu { | |||
55 | 55 | ||
56 | struct pp_smu_wm_set_range { | 56 | struct pp_smu_wm_set_range { |
57 | unsigned int wm_inst; | 57 | unsigned int wm_inst; |
58 | uint32_t min_fill_clk_khz; | 58 | uint32_t min_fill_clk_mhz; |
59 | uint32_t max_fill_clk_khz; | 59 | uint32_t max_fill_clk_mhz; |
60 | uint32_t min_drain_clk_khz; | 60 | uint32_t min_drain_clk_mhz; |
61 | uint32_t max_drain_clk_khz; | 61 | uint32_t max_drain_clk_mhz; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | #define MAX_WATERMARK_SETS 4 | 64 | #define MAX_WATERMARK_SETS 4 |
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv { | |||
77 | */ | 77 | */ |
78 | unsigned int display_count; | 78 | unsigned int display_count; |
79 | 79 | ||
80 | /* PPSMC_MSG_SetHardMinFclkByFreq: khz | 80 | /* PPSMC_MSG_SetHardMinFclkByFreq: mhz |
81 | * FCLK will vary with DPM, but never below requested hard min | 81 | * FCLK will vary with DPM, but never below requested hard min |
82 | */ | 82 | */ |
83 | unsigned int hard_min_fclk_khz; | 83 | unsigned int hard_min_fclk_mhz; |
84 | 84 | ||
85 | /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz | 85 | /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz |
86 | * fixed clock at requested freq, either from FCH bypass or DFS | 86 | * fixed clock at requested freq, either from FCH bypass or DFS |
87 | */ | 87 | */ |
88 | unsigned int hard_min_dcefclk_khz; | 88 | unsigned int hard_min_dcefclk_mhz; |
89 | 89 | ||
90 | /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz | 90 | /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz |
91 | * when DF is in cstate, dcf clock is further divided down | 91 | * when DF is in cstate, dcf clock is further divided down |
@@ -103,13 +103,19 @@ struct pp_smu_funcs_rv { | |||
103 | void (*set_display_count)(struct pp_smu *pp, int count); | 103 | void (*set_display_count)(struct pp_smu *pp, int count); |
104 | 104 | ||
105 | /* which SMU message? are reader and writer WM separate SMU msg? */ | 105 | /* which SMU message? are reader and writer WM separate SMU msg? */ |
106 | /* | ||
107 | * PPSMC_MSG_SetDriverDramAddrHigh | ||
108 | * PPSMC_MSG_SetDriverDramAddrLow | ||
109 | * PPSMC_MSG_TransferTableDram2Smu | ||
110 | * | ||
111 | * */ | ||
106 | void (*set_wm_ranges)(struct pp_smu *pp, | 112 | void (*set_wm_ranges)(struct pp_smu *pp, |
107 | struct pp_smu_wm_range_sets *ranges); | 113 | struct pp_smu_wm_range_sets *ranges); |
108 | 114 | ||
109 | /* PPSMC_MSG_SetHardMinDcfclkByFreq | 115 | /* PPSMC_MSG_SetHardMinDcfclkByFreq |
110 | * fixed clock at requested freq, either from FCH bypass or DFS | 116 | * fixed clock at requested freq, either from FCH bypass or DFS |
111 | */ | 117 | */ |
112 | void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz); | 118 | void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz); |
113 | 119 | ||
114 | /* PPSMC_MSG_SetMinDeepSleepDcfclk | 120 | /* PPSMC_MSG_SetMinDeepSleepDcfclk |
115 | * when DF is in cstate, dcf clock is further divided down | 121 | * when DF is in cstate, dcf clock is further divided down |
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv { | |||
120 | /* PPSMC_MSG_SetHardMinFclkByFreq | 126 | /* PPSMC_MSG_SetHardMinFclkByFreq |
121 | * FCLK will vary with DPM, but never below requested hard min | 127 | * FCLK will vary with DPM, but never below requested hard min |
122 | */ | 128 | */ |
123 | void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz); | 129 | void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz); |
124 | 130 | ||
125 | /* PPSMC_MSG_SetHardMinSocclkByFreq | 131 | /* PPSMC_MSG_SetHardMinSocclkByFreq |
126 | * Needed for DWB support | 132 | * Needed for DWB support |
127 | */ | 133 | */ |
128 | void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz); | 134 | void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz); |
129 | 135 | ||
130 | /* PME w/a */ | 136 | /* PME w/a */ |
131 | void (*set_pme_wa_enable)(struct pp_smu *pp); | 137 | void (*set_pme_wa_enable)(struct pp_smu *pp); |
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h index 2b83f922ac02..1af8c777b3ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h | |||
@@ -208,22 +208,20 @@ struct dm_bl_data_point { | |||
208 | /* Brightness level as effective value in range 0-255, | 208 | /* Brightness level as effective value in range 0-255, |
209 | * corresponding to above percentage | 209 | * corresponding to above percentage |
210 | */ | 210 | */ |
211 | uint8_t signalLevel; | 211 | uint8_t signal_level; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* Total size of the structure should not exceed 256 bytes */ | 214 | /* Total size of the structure should not exceed 256 bytes */ |
215 | struct dm_acpi_atif_backlight_caps { | 215 | struct dm_acpi_atif_backlight_caps { |
216 | |||
217 | |||
218 | uint16_t size; /* Bytes 0-1 (2 bytes) */ | 216 | uint16_t size; /* Bytes 0-1 (2 bytes) */ |
219 | uint16_t flags; /* Byted 2-3 (2 bytes) */ | 217 | uint16_t flags; /* Byted 2-3 (2 bytes) */ |
220 | uint8_t errorCode; /* Byte 4 */ | 218 | uint8_t error_code; /* Byte 4 */ |
221 | uint8_t acLevelPercentage; /* Byte 5 */ | 219 | uint8_t ac_level_percentage; /* Byte 5 */ |
222 | uint8_t dcLevelPercentage; /* Byte 6 */ | 220 | uint8_t dc_level_percentage; /* Byte 6 */ |
223 | uint8_t minInputSignal; /* Byte 7 */ | 221 | uint8_t min_input_signal; /* Byte 7 */ |
224 | uint8_t maxInputSignal; /* Byte 8 */ | 222 | uint8_t max_input_signal; /* Byte 8 */ |
225 | uint8_t numOfDataPoints; /* Byte 9 */ | 223 | uint8_t num_data_points; /* Byte 9 */ |
226 | struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/ | 224 | struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ |
227 | }; | 225 | }; |
228 | 226 | ||
229 | enum dm_acpi_display_type { | 227 | enum dm_acpi_display_type { |
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index cbafce649e33..5dd04520ceca 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | |||
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st { | |||
113 | int use_urgent_burst_bw; | 113 | int use_urgent_burst_bw; |
114 | double max_hscl_ratio; | 114 | double max_hscl_ratio; |
115 | double max_vscl_ratio; | 115 | double max_vscl_ratio; |
116 | struct _vcs_dpi_voltage_scaling_st clock_limits[7]; | 116 | unsigned int num_states; |
117 | struct _vcs_dpi_voltage_scaling_st clock_limits[8]; | ||
117 | }; | 118 | }; |
118 | 119 | ||
119 | struct _vcs_dpi_ip_params_st { | 120 | struct _vcs_dpi_ip_params_st { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h index 39ee8eba3c31..d1656c9d50df 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h +++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h | |||
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw | |||
126 | static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2) | 126 | static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2) |
127 | { | 127 | { |
128 | struct bw_fixed res; | 128 | struct bw_fixed res; |
129 | div64_u64_rem(arg1.value, arg2.value, &res.value); | 129 | div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value); |
130 | return res; | 130 | return res; |
131 | } | 131 | } |
132 | 132 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c1976c175b57..e3ee96afa60e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h | |||
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option); | |||
82 | 82 | ||
83 | void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); | 83 | void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); |
84 | /********** DAL Core*********************/ | 84 | /********** DAL Core*********************/ |
85 | #include "display_clock.h" | 85 | #include "hw/clk_mgr.h" |
86 | #include "transform.h" | 86 | #include "transform.h" |
87 | #include "dpp.h" | 87 | #include "dpp.h" |
88 | 88 | ||
@@ -169,6 +169,7 @@ struct resource_pool { | |||
169 | unsigned int audio_count; | 169 | unsigned int audio_count; |
170 | struct audio_support audio_support; | 170 | struct audio_support audio_support; |
171 | 171 | ||
172 | struct clk_mgr *clk_mgr; | ||
172 | struct dccg *dccg; | 173 | struct dccg *dccg; |
173 | struct irq_service *irqs; | 174 | struct irq_service *irqs; |
174 | 175 | ||
@@ -287,7 +288,7 @@ struct dc_state { | |||
287 | struct dcn_bw_internal_vars dcn_bw_vars; | 288 | struct dcn_bw_internal_vars dcn_bw_vars; |
288 | #endif | 289 | #endif |
289 | 290 | ||
290 | struct dccg *dis_clk; | 291 | struct clk_mgr *dccg; |
291 | 292 | ||
292 | struct kref refcount; | 293 | struct kref refcount; |
293 | }; | 294 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index e688eb9b975c..ece954a40a8e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | |||
@@ -31,8 +31,8 @@ | |||
31 | #define __DCN_CALCS_H__ | 31 | #define __DCN_CALCS_H__ |
32 | 32 | ||
33 | #include "bw_fixed.h" | 33 | #include "bw_fixed.h" |
34 | #include "display_clock.h" | ||
35 | #include "../dml/display_mode_lib.h" | 34 | #include "../dml/display_mode_lib.h" |
35 | #include "hw/clk_mgr.h" | ||
36 | 36 | ||
37 | struct dc; | 37 | struct dc; |
38 | struct dc_state; | 38 | struct dc_state; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h index a83a48494613..abc961c0906e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h | |||
@@ -47,12 +47,18 @@ struct abm_funcs { | |||
47 | bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); | 47 | bool (*set_abm_level)(struct abm *abm, unsigned int abm_level); |
48 | bool (*set_abm_immediate_disable)(struct abm *abm); | 48 | bool (*set_abm_immediate_disable)(struct abm *abm); |
49 | bool (*init_backlight)(struct abm *abm); | 49 | bool (*init_backlight)(struct abm *abm); |
50 | bool (*set_backlight_level)(struct abm *abm, | 50 | |
51 | unsigned int backlight_level, | 51 | /* backlight_pwm_u16_16 is unsigned 32 bit, |
52 | * 16 bit integer + 16 fractional, where 1.0 is max backlight value. | ||
53 | */ | ||
54 | bool (*set_backlight_level_pwm)(struct abm *abm, | ||
55 | unsigned int backlight_pwm_u16_16, | ||
52 | unsigned int frame_ramp, | 56 | unsigned int frame_ramp, |
53 | unsigned int controller_id, | 57 | unsigned int controller_id, |
54 | bool use_smooth_brightness); | 58 | bool use_smooth_brightness); |
55 | unsigned int (*get_current_backlight_8_bit)(struct abm *abm); | 59 | |
60 | unsigned int (*get_current_backlight)(struct abm *abm); | ||
61 | unsigned int (*get_target_backlight)(struct abm *abm); | ||
56 | }; | 62 | }; |
57 | 63 | ||
58 | #endif | 64 | #endif |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 689faa16c0ae..23a4b18e5fee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | |||
@@ -23,41 +23,25 @@ | |||
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | #ifndef __DISPLAY_CLOCK_H__ | 26 | #ifndef __DAL_CLK_MGR_H__ |
27 | #define __DISPLAY_CLOCK_H__ | 27 | #define __DAL_CLK_MGR_H__ |
28 | 28 | ||
29 | #include "dm_services_types.h" | 29 | #include "dm_services_types.h" |
30 | #include "dc.h" | 30 | #include "dc.h" |
31 | 31 | ||
32 | /* Structure containing all state-dependent clocks | 32 | struct clk_mgr { |
33 | * (dependent on "enum clocks_state") */ | ||
34 | struct state_dependent_clocks { | ||
35 | int display_clk_khz; | ||
36 | int pixel_clk_khz; | ||
37 | }; | ||
38 | |||
39 | struct dccg { | ||
40 | struct dc_context *ctx; | 33 | struct dc_context *ctx; |
41 | const struct display_clock_funcs *funcs; | 34 | const struct clk_mgr_funcs *funcs; |
42 | 35 | ||
43 | enum dm_pp_clocks_state max_clks_state; | ||
44 | enum dm_pp_clocks_state cur_min_clks_state; | ||
45 | struct dc_clocks clks; | 36 | struct dc_clocks clks; |
46 | }; | 37 | }; |
47 | 38 | ||
48 | struct display_clock_funcs { | 39 | struct clk_mgr_funcs { |
49 | void (*update_clocks)(struct dccg *dccg, | 40 | void (*update_clocks)(struct clk_mgr *clk_mgr, |
50 | struct dc_clocks *new_clocks, | 41 | struct dc_state *context, |
51 | bool safe_to_lower); | 42 | bool safe_to_lower); |
52 | int (*set_dispclk)(struct dccg *dccg, | ||
53 | int requested_clock_khz); | ||
54 | |||
55 | int (*get_dp_ref_clk_frequency)(struct dccg *dccg); | ||
56 | 43 | ||
57 | bool (*update_dfs_bypass)(struct dccg *dccg, | 44 | int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); |
58 | struct dc *dc, | ||
59 | struct dc_state *context, | ||
60 | int requested_clock_khz); | ||
61 | }; | 45 | }; |
62 | 46 | ||
63 | #endif /* __DISPLAY_CLOCK_H__ */ | 47 | #endif /* __DAL_CLK_MGR_H__ */ |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h new file mode 100644 index 000000000000..95a56d012626 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright 2018 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: AMD | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | #ifndef __DAL_DCCG_H__ | ||
27 | #define __DAL_DCCG_H__ | ||
28 | |||
29 | #include "dc_types.h" | ||
30 | |||
31 | struct dccg { | ||
32 | struct dc_context *ctx; | ||
33 | const struct dccg_funcs *funcs; | ||
34 | |||
35 | int ref_dppclk; | ||
36 | }; | ||
37 | |||
38 | struct dccg_funcs { | ||
39 | void (*update_dpp_dto)(struct dccg *dccg, | ||
40 | int dpp_inst, | ||
41 | int req_dppclk); | ||
42 | }; | ||
43 | |||
44 | #endif //__DAL_DCCG_H__ | ||
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index cf7433ebf91a..da85537a4488 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | |||
@@ -53,6 +53,12 @@ struct curve_points { | |||
53 | uint32_t custom_float_slope; | 53 | uint32_t custom_float_slope; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct curve_points3 { | ||
57 | struct curve_points red; | ||
58 | struct curve_points green; | ||
59 | struct curve_points blue; | ||
60 | }; | ||
61 | |||
56 | struct pwl_result_data { | 62 | struct pwl_result_data { |
57 | struct fixed31_32 red; | 63 | struct fixed31_32 red; |
58 | struct fixed31_32 green; | 64 | struct fixed31_32 green; |
@@ -71,9 +77,17 @@ struct pwl_result_data { | |||
71 | uint32_t delta_blue_reg; | 77 | uint32_t delta_blue_reg; |
72 | }; | 78 | }; |
73 | 79 | ||
80 | /* arr_curve_points - regamma regions/segments specification | ||
81 | * arr_points - beginning and end point specified separately (only one on DCE) | ||
82 | * corner_points - beginning and end point for all 3 colors (DCN) | ||
83 | * rgb_resulted - final curve | ||
84 | */ | ||
74 | struct pwl_params { | 85 | struct pwl_params { |
75 | struct gamma_curve arr_curve_points[34]; | 86 | struct gamma_curve arr_curve_points[34]; |
76 | struct curve_points arr_points[2]; | 87 | union { |
88 | struct curve_points arr_points[2]; | ||
89 | struct curve_points3 corner_points[2]; | ||
90 | }; | ||
77 | struct pwl_result_data rgb_resulted[256 + 3]; | 91 | struct pwl_result_data rgb_resulted[256 + 3]; |
78 | uint32_t hw_points_num; | 92 | uint32_t hw_points_num; |
79 | }; | 93 | }; |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index e28e9770e0a3..c20fdcaac53b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h | |||
@@ -65,7 +65,8 @@ struct encoder_feature_support { | |||
65 | 65 | ||
66 | enum dc_color_depth max_hdmi_deep_color; | 66 | enum dc_color_depth max_hdmi_deep_color; |
67 | unsigned int max_hdmi_pixel_clock; | 67 | unsigned int max_hdmi_pixel_clock; |
68 | bool ycbcr420_supported; | 68 | bool hdmi_ycbcr420_supported; |
69 | bool dp_ycbcr420_supported; | ||
69 | }; | 70 | }; |
70 | 71 | ||
71 | union dpcd_psr_configuration { | 72 | union dpcd_psr_configuration { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index da89c2edb07c..06df02ddff6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #include "dml/display_mode_structs.h" | 31 | #include "dml/display_mode_structs.h" |
32 | 32 | ||
33 | struct dchub_init_data; | 33 | struct dchub_init_data; |
34 | struct cstate_pstate_watermarks_st { | 34 | struct cstate_pstate_watermarks_st1 { |
35 | uint32_t cstate_exit_ns; | 35 | uint32_t cstate_exit_ns; |
36 | uint32_t cstate_enter_plus_exit_ns; | 36 | uint32_t cstate_enter_plus_exit_ns; |
37 | uint32_t pstate_change_ns; | 37 | uint32_t pstate_change_ns; |
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st { | |||
40 | struct dcn_watermarks { | 40 | struct dcn_watermarks { |
41 | uint32_t pte_meta_urgent_ns; | 41 | uint32_t pte_meta_urgent_ns; |
42 | uint32_t urgent_ns; | 42 | uint32_t urgent_ns; |
43 | struct cstate_pstate_watermarks_st cstate_pstate; | 43 | struct cstate_pstate_watermarks_st1 cstate_pstate; |
44 | }; | 44 | }; |
45 | 45 | ||
46 | struct dcn_watermark_set { | 46 | struct dcn_watermark_set { |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 26f29d5da3d8..e9b702ce02dd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | |||
@@ -32,8 +32,6 @@ | |||
32 | #include "inc/hw/link_encoder.h" | 32 | #include "inc/hw/link_encoder.h" |
33 | #include "core_status.h" | 33 | #include "core_status.h" |
34 | 34 | ||
35 | #define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF | ||
36 | |||
37 | enum pipe_gating_control { | 35 | enum pipe_gating_control { |
38 | PIPE_GATING_CONTROL_DISABLE = 0, | 36 | PIPE_GATING_CONTROL_DISABLE = 0, |
39 | PIPE_GATING_CONTROL_ENABLE, | 37 | PIPE_GATING_CONTROL_ENABLE, |
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs { | |||
87 | void (*program_gamut_remap)( | 85 | void (*program_gamut_remap)( |
88 | struct pipe_ctx *pipe_ctx); | 86 | struct pipe_ctx *pipe_ctx); |
89 | 87 | ||
90 | void (*program_csc_matrix)( | ||
91 | struct pipe_ctx *pipe_ctx, | ||
92 | enum dc_color_space colorspace, | ||
93 | uint16_t *matrix); | ||
94 | |||
95 | void (*program_output_csc)(struct dc *dc, | 88 | void (*program_output_csc)(struct dc *dc, |
96 | struct pipe_ctx *pipe_ctx, | 89 | struct pipe_ctx *pipe_ctx, |
97 | enum dc_color_space colorspace, | 90 | enum dc_color_space colorspace, |
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs { | |||
177 | struct pipe_ctx *pipe_ctx, | 170 | struct pipe_ctx *pipe_ctx, |
178 | bool blank); | 171 | bool blank); |
179 | 172 | ||
180 | void (*set_bandwidth)( | 173 | void (*prepare_bandwidth)( |
181 | struct dc *dc, | 174 | struct dc *dc, |
182 | struct dc_state *context, | 175 | struct dc_state *context); |
183 | bool safe_to_lower); | 176 | void (*optimize_bandwidth)( |
177 | struct dc *dc, | ||
178 | struct dc_state *context); | ||
184 | 179 | ||
185 | void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, | 180 | void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes, |
186 | int vmin, int vmax); | 181 | int vmin, int vmax); |
@@ -210,11 +205,6 @@ struct hw_sequencer_funcs { | |||
210 | struct resource_pool *res_pool, | 205 | struct resource_pool *res_pool, |
211 | struct pipe_ctx *pipe_ctx); | 206 | struct pipe_ctx *pipe_ctx); |
212 | 207 | ||
213 | void (*ready_shared_resources)(struct dc *dc, struct dc_state *context); | ||
214 | void (*optimize_shared_resources)(struct dc *dc); | ||
215 | void (*pplib_apply_display_requirements)( | ||
216 | struct dc *dc, | ||
217 | struct dc_state *context); | ||
218 | void (*edp_power_control)( | 208 | void (*edp_power_control)( |
219 | struct dc_link *link, | 209 | struct dc_link *link, |
220 | bool enable); | 210 | bool enable); |
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 33b99e3ab10d..0086a2f1d21a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h | |||
@@ -30,9 +30,6 @@ | |||
30 | #include "dal_asic_id.h" | 30 | #include "dal_asic_id.h" |
31 | #include "dm_pp_smu.h" | 31 | #include "dm_pp_smu.h" |
32 | 32 | ||
33 | /* TODO unhardcode, 4 for CZ*/ | ||
34 | #define MEMORY_TYPE_MULTIPLIER 4 | ||
35 | |||
36 | enum dce_version resource_parse_asic_id( | 33 | enum dce_version resource_parse_asic_id( |
37 | struct hw_asic_id asic_id); | 34 | struct hw_asic_id asic_id); |
38 | 35 | ||
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index cdcefd087487..7480f072c375 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c | |||
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space( | |||
306 | a1); | 306 | a1); |
307 | } | 307 | } |
308 | 308 | ||
309 | static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) | ||
310 | { | ||
311 | struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); | ||
312 | |||
313 | return translate_from_linear_space(arg, | ||
314 | dc_fixpt_zero, | ||
315 | dc_fixpt_zero, | ||
316 | dc_fixpt_zero, | ||
317 | dc_fixpt_zero, | ||
318 | gamma); | ||
319 | } | ||
320 | |||
309 | static struct fixed31_32 translate_to_linear_space( | 321 | static struct fixed31_32 translate_to_linear_space( |
310 | struct fixed31_32 arg, | 322 | struct fixed31_32 arg, |
311 | struct fixed31_32 a0, | 323 | struct fixed31_32 a0, |
@@ -709,6 +721,169 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma, | |||
709 | } | 721 | } |
710 | } | 722 | } |
711 | 723 | ||
724 | static void hermite_spline_eetf(struct fixed31_32 input_x, | ||
725 | struct fixed31_32 max_display, | ||
726 | struct fixed31_32 min_display, | ||
727 | struct fixed31_32 max_content, | ||
728 | struct fixed31_32 *out_x) | ||
729 | { | ||
730 | struct fixed31_32 min_lum_pq; | ||
731 | struct fixed31_32 max_lum_pq; | ||
732 | struct fixed31_32 max_content_pq; | ||
733 | struct fixed31_32 ks; | ||
734 | struct fixed31_32 E1; | ||
735 | struct fixed31_32 E2; | ||
736 | struct fixed31_32 E3; | ||
737 | struct fixed31_32 t; | ||
738 | struct fixed31_32 t2; | ||
739 | struct fixed31_32 t3; | ||
740 | struct fixed31_32 two; | ||
741 | struct fixed31_32 three; | ||
742 | struct fixed31_32 temp1; | ||
743 | struct fixed31_32 temp2; | ||
744 | struct fixed31_32 a = dc_fixpt_from_fraction(15, 10); | ||
745 | struct fixed31_32 b = dc_fixpt_from_fraction(5, 10); | ||
746 | struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small | ||
747 | |||
748 | if (dc_fixpt_eq(max_content, dc_fixpt_zero)) { | ||
749 | *out_x = dc_fixpt_zero; | ||
750 | return; | ||
751 | } | ||
752 | |||
753 | compute_pq(input_x, &E1); | ||
754 | compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq); | ||
755 | compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq); | ||
756 | compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird | ||
757 | a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent | ||
758 | ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b | ||
759 | |||
760 | if (dc_fixpt_lt(E1, ks)) | ||
761 | E2 = E1; | ||
762 | else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) { | ||
763 | if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks))) | ||
764 | // t = (E1 - ks) / (1 - ks) | ||
765 | t = dc_fixpt_div(dc_fixpt_sub(E1, ks), | ||
766 | dc_fixpt_sub(dc_fixpt_one, ks)); | ||
767 | else | ||
768 | t = dc_fixpt_zero; | ||
769 | |||
770 | two = dc_fixpt_from_int(2); | ||
771 | three = dc_fixpt_from_int(3); | ||
772 | |||
773 | t2 = dc_fixpt_mul(t, t); | ||
774 | t3 = dc_fixpt_mul(t2, t); | ||
775 | temp1 = dc_fixpt_mul(two, t3); | ||
776 | temp2 = dc_fixpt_mul(three, t2); | ||
777 | |||
778 | // (2t^3 - 3t^2 + 1) * ks | ||
779 | E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one, | ||
780 | dc_fixpt_sub(temp1, temp2))); | ||
781 | |||
782 | // (-2t^3 + 3t^2) * max_lum_pq | ||
783 | E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq, | ||
784 | dc_fixpt_sub(temp2, temp1))); | ||
785 | |||
786 | temp1 = dc_fixpt_mul(two, t2); | ||
787 | temp2 = dc_fixpt_sub(dc_fixpt_one, ks); | ||
788 | |||
789 | // (t^3 - 2t^2 + t) * (1-ks) | ||
790 | E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2, | ||
791 | dc_fixpt_add(t, dc_fixpt_sub(t3, temp1)))); | ||
792 | } else | ||
793 | E2 = dc_fixpt_one; | ||
794 | |||
795 | temp1 = dc_fixpt_sub(dc_fixpt_one, E2); | ||
796 | temp2 = dc_fixpt_mul(temp1, temp1); | ||
797 | temp2 = dc_fixpt_mul(temp2, temp2); | ||
798 | // temp2 = (1-E2)^4 | ||
799 | |||
800 | E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2)); | ||
801 | compute_de_pq(E3, out_x); | ||
802 | |||
803 | *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content)); | ||
804 | } | ||
805 | |||
806 | static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, | ||
807 | uint32_t hw_points_num, | ||
808 | const struct hw_x_point *coordinate_x, | ||
809 | const struct freesync_hdr_tf_params *fs_params) | ||
810 | { | ||
811 | uint32_t i; | ||
812 | struct pwl_float_data_ex *rgb = rgb_regamma; | ||
813 | const struct hw_x_point *coord_x = coordinate_x; | ||
814 | struct fixed31_32 scaledX = dc_fixpt_zero; | ||
815 | struct fixed31_32 scaledX1 = dc_fixpt_zero; | ||
816 | struct fixed31_32 max_display = dc_fixpt_from_int(fs_params->max_display); | ||
817 | struct fixed31_32 min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000); | ||
818 | struct fixed31_32 max_content = dc_fixpt_from_int(fs_params->max_content); | ||
819 | struct fixed31_32 min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000); | ||
820 | struct fixed31_32 clip = dc_fixpt_one; | ||
821 | struct fixed31_32 output; | ||
822 | bool use_eetf = false; | ||
823 | bool is_clipped = false; | ||
824 | struct fixed31_32 sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level); | ||
825 | |||
826 | if (fs_params == NULL || fs_params->max_content == 0 || | ||
827 | fs_params->max_display == 0) | ||
828 | return false; | ||
829 | |||
830 | if (fs_params->min_display > 1000) // cap at 0.1 at the bottom | ||
831 | min_display = dc_fixpt_from_fraction(1, 10); | ||
832 | if (fs_params->max_display < 100) // cap at 100 at the top | ||
833 | max_display = dc_fixpt_from_int(100); | ||
834 | |||
835 | if (fs_params->min_content < fs_params->min_display) | ||
836 | use_eetf = true; | ||
837 | else | ||
838 | min_content = min_display; | ||
839 | |||
840 | if (fs_params->max_content > fs_params->max_display) | ||
841 | use_eetf = true; | ||
842 | else | ||
843 | max_content = max_display; | ||
844 | |||
845 | rgb += 32; // first 32 points have problems with fixed point, too small | ||
846 | coord_x += 32; | ||
847 | for (i = 32; i <= hw_points_num; i++) { | ||
848 | if (!is_clipped) { | ||
849 | if (use_eetf) { | ||
850 | /*max content is equal 1 */ | ||
851 | scaledX1 = dc_fixpt_div(coord_x->x, | ||
852 | dc_fixpt_div(max_content, sdr_white_level)); | ||
853 | hermite_spline_eetf(scaledX1, max_display, min_display, | ||
854 | max_content, &scaledX); | ||
855 | } else | ||
856 | scaledX = dc_fixpt_div(coord_x->x, | ||
857 | dc_fixpt_div(max_display, sdr_white_level)); | ||
858 | |||
859 | if (dc_fixpt_lt(scaledX, clip)) { | ||
860 | if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) | ||
861 | output = dc_fixpt_zero; | ||
862 | else | ||
863 | output = calculate_gamma22(scaledX); | ||
864 | |||
865 | rgb->r = output; | ||
866 | rgb->g = output; | ||
867 | rgb->b = output; | ||
868 | } else { | ||
869 | is_clipped = true; | ||
870 | rgb->r = clip; | ||
871 | rgb->g = clip; | ||
872 | rgb->b = clip; | ||
873 | } | ||
874 | } else { | ||
875 | rgb->r = clip; | ||
876 | rgb->g = clip; | ||
877 | rgb->b = clip; | ||
878 | } | ||
879 | |||
880 | ++coord_x; | ||
881 | ++rgb; | ||
882 | } | ||
883 | |||
884 | return true; | ||
885 | } | ||
886 | |||
712 | static void build_degamma(struct pwl_float_data_ex *curve, | 887 | static void build_degamma(struct pwl_float_data_ex *curve, |
713 | uint32_t hw_points_num, | 888 | uint32_t hw_points_num, |
714 | const struct hw_x_point *coordinate_x, bool is_2_4) | 889 | const struct hw_x_point *coordinate_x, bool is_2_4) |
@@ -1356,7 +1531,8 @@ static bool map_regamma_hw_to_x_user( | |||
1356 | #define _EXTRA_POINTS 3 | 1531 | #define _EXTRA_POINTS 3 |
1357 | 1532 | ||
1358 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | 1533 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, |
1359 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed) | 1534 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, |
1535 | const struct freesync_hdr_tf_params *fs_params) | ||
1360 | { | 1536 | { |
1361 | struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; | 1537 | struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; |
1362 | struct dividers dividers; | 1538 | struct dividers dividers; |
@@ -1374,7 +1550,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1374 | /* we can use hardcoded curve for plain SRGB TF */ | 1550 | /* we can use hardcoded curve for plain SRGB TF */ |
1375 | if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && | 1551 | if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && |
1376 | output_tf->tf == TRANSFER_FUNCTION_SRGB && | 1552 | output_tf->tf == TRANSFER_FUNCTION_SRGB && |
1377 | (!mapUserRamp && ramp->type == GAMMA_RGB_256)) | 1553 | (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256))) |
1378 | return true; | 1554 | return true; |
1379 | 1555 | ||
1380 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; | 1556 | output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; |
@@ -1424,6 +1600,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | |||
1424 | MAX_HW_POINTS, | 1600 | MAX_HW_POINTS, |
1425 | coordinates_x, | 1601 | coordinates_x, |
1426 | output_tf->sdr_ref_white_level); | 1602 | output_tf->sdr_ref_white_level); |
1603 | } else if (tf == TRANSFER_FUNCTION_GAMMA22 && | ||
1604 | fs_params != NULL) { | ||
1605 | build_freesync_hdr(rgb_regamma, | ||
1606 | MAX_HW_POINTS, | ||
1607 | coordinates_x, | ||
1608 | fs_params); | ||
1427 | } else { | 1609 | } else { |
1428 | tf_pts->end_exponent = 0; | 1610 | tf_pts->end_exponent = 0; |
1429 | tf_pts->x_point_at_y1_red = 1; | 1611 | tf_pts->x_point_at_y1_red = 1; |
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index 63ccb9c91224..a6e164df090a 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h | |||
@@ -73,12 +73,21 @@ struct regamma_lut { | |||
73 | }; | 73 | }; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct freesync_hdr_tf_params { | ||
77 | unsigned int sdr_white_level; | ||
78 | unsigned int min_content; // luminance in 1/10000 nits | ||
79 | unsigned int max_content; // luminance in nits | ||
80 | unsigned int min_display; // luminance in 1/10000 nits | ||
81 | unsigned int max_display; // luminance in nits | ||
82 | }; | ||
83 | |||
76 | void setup_x_points_distribution(void); | 84 | void setup_x_points_distribution(void); |
77 | void precompute_pq(void); | 85 | void precompute_pq(void); |
78 | void precompute_de_pq(void); | 86 | void precompute_de_pq(void); |
79 | 87 | ||
80 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, | 88 | bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, |
81 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed); | 89 | const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, |
90 | const struct freesync_hdr_tf_params *fs_params); | ||
82 | 91 | ||
83 | bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, | 92 | bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, |
84 | const struct dc_gamma *ramp, bool mapUserRamp); | 93 | const struct dc_gamma *ramp, bool mapUserRamp); |
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4018c7180d00..620a171620ee 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #define RENDER_TIMES_MAX_COUNT 10 | 37 | #define RENDER_TIMES_MAX_COUNT 10 |
38 | /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ | 38 | /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ |
39 | #define BTR_EXIT_MARGIN 2000 | 39 | #define BTR_EXIT_MARGIN 2000 |
40 | /*Threshold to exit fixed refresh rate*/ | ||
41 | #define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4 | ||
40 | /* Number of consecutive frames to check before entering/exiting fixed refresh*/ | 42 | /* Number of consecutive frames to check before entering/exiting fixed refresh*/ |
41 | #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 | 43 | #define FIXED_REFRESH_ENTER_FRAME_COUNT 5 |
42 | #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 | 44 | #define FIXED_REFRESH_EXIT_FRAME_COUNT 5 |
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync, | |||
257 | if (in_out_vrr->btr.btr_active) { | 259 | if (in_out_vrr->btr.btr_active) { |
258 | in_out_vrr->btr.frame_counter = 0; | 260 | in_out_vrr->btr.frame_counter = 0; |
259 | in_out_vrr->btr.btr_active = false; | 261 | in_out_vrr->btr.btr_active = false; |
260 | |||
261 | /* Exit Fixed Refresh mode */ | ||
262 | } else if (in_out_vrr->fixed.fixed_active) { | ||
263 | |||
264 | in_out_vrr->fixed.frame_counter++; | ||
265 | |||
266 | if (in_out_vrr->fixed.frame_counter > | ||
267 | FIXED_REFRESH_EXIT_FRAME_COUNT) { | ||
268 | in_out_vrr->fixed.frame_counter = 0; | ||
269 | in_out_vrr->fixed.fixed_active = false; | ||
270 | } | ||
271 | } | 262 | } |
272 | } else if (last_render_time_in_us > max_render_time_in_us) { | 263 | } else if (last_render_time_in_us > max_render_time_in_us) { |
273 | /* Enter Below the Range */ | 264 | /* Enter Below the Range */ |
274 | if (!in_out_vrr->btr.btr_active && | 265 | in_out_vrr->btr.btr_active = true; |
275 | in_out_vrr->btr.btr_enabled) { | ||
276 | in_out_vrr->btr.btr_active = true; | ||
277 | |||
278 | /* Enter Fixed Refresh mode */ | ||
279 | } else if (!in_out_vrr->fixed.fixed_active && | ||
280 | !in_out_vrr->btr.btr_enabled) { | ||
281 | in_out_vrr->fixed.frame_counter++; | ||
282 | |||
283 | if (in_out_vrr->fixed.frame_counter > | ||
284 | FIXED_REFRESH_ENTER_FRAME_COUNT) { | ||
285 | in_out_vrr->fixed.frame_counter = 0; | ||
286 | in_out_vrr->fixed.fixed_active = true; | ||
287 | } | ||
288 | } | ||
289 | } | 266 | } |
290 | 267 | ||
291 | /* BTR set to "not active" so disengage */ | 268 | /* BTR set to "not active" so disengage */ |
292 | if (!in_out_vrr->btr.btr_active) { | 269 | if (!in_out_vrr->btr.btr_active) { |
293 | in_out_vrr->btr.btr_active = false; | ||
294 | in_out_vrr->btr.inserted_duration_in_us = 0; | 270 | in_out_vrr->btr.inserted_duration_in_us = 0; |
295 | in_out_vrr->btr.frames_to_insert = 0; | 271 | in_out_vrr->btr.frames_to_insert = 0; |
296 | in_out_vrr->btr.frame_counter = 0; | 272 | in_out_vrr->btr.frame_counter = 0; |
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync, | |||
375 | bool update = false; | 351 | bool update = false; |
376 | unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; | 352 | unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; |
377 | 353 | ||
378 | if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { | 354 | //Compute the exit refresh rate and exit frame duration |
355 | unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) | ||
356 | + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); | ||
357 | unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz; | ||
358 | |||
359 | if (last_render_time_in_us < exit_frame_duration_in_us) { | ||
379 | /* Exit Fixed Refresh mode */ | 360 | /* Exit Fixed Refresh mode */ |
380 | if (in_out_vrr->fixed.fixed_active) { | 361 | if (in_out_vrr->fixed.fixed_active) { |
381 | in_out_vrr->fixed.frame_counter++; | 362 | in_out_vrr->fixed.frame_counter++; |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 2083c308007c..470d7b89071a 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK { | |||
133 | PP_AVFS_MASK = 0x40000, | 133 | PP_AVFS_MASK = 0x40000, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | enum DC_FEATURE_MASK { | ||
137 | DC_FBC_MASK = 0x1, | ||
138 | }; | ||
139 | |||
136 | /** | 140 | /** |
137 | * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks | 141 | * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks |
138 | */ | 142 | */ |
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index d2e7c0fa96c2..8eb0bb241210 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h | |||
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 { | |||
1325 | struct atom_common_table_header table_header; | 1325 | struct atom_common_table_header table_header; |
1326 | uint8_t smuip_min_ver; | 1326 | uint8_t smuip_min_ver; |
1327 | uint8_t smuip_max_ver; | 1327 | uint8_t smuip_max_ver; |
1328 | uint8_t smu_rsd1; | 1328 | uint8_t waflclk_ss_mode; |
1329 | uint8_t gpuclk_ss_mode; | 1329 | uint8_t gpuclk_ss_mode; |
1330 | uint16_t sclk_ss_percentage; | 1330 | uint16_t sclk_ss_percentage; |
1331 | uint16_t sclk_ss_rate_10hz; | 1331 | uint16_t sclk_ss_rate_10hz; |
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 { | |||
1355 | uint32_t syspll3_1_vco_freq_10khz; | 1355 | uint32_t syspll3_1_vco_freq_10khz; |
1356 | uint32_t bootup_fclk_10khz; | 1356 | uint32_t bootup_fclk_10khz; |
1357 | uint32_t bootup_waflclk_10khz; | 1357 | uint32_t bootup_waflclk_10khz; |
1358 | uint32_t reserved[3]; | 1358 | uint32_t smu_info_caps; |
1359 | uint16_t waflclk_ss_percentage; // in unit of 0.001% | ||
1360 | uint16_t smuinitoffset; | ||
1361 | uint32_t reserved; | ||
1359 | }; | 1362 | }; |
1360 | 1363 | ||
1361 | /* | 1364 | /* |
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 64ecffd52126..58ac0b90c310 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h | |||
@@ -205,20 +205,6 @@ struct tile_config { | |||
205 | /** | 205 | /** |
206 | * struct kfd2kgd_calls | 206 | * struct kfd2kgd_calls |
207 | * | 207 | * |
208 | * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture. | ||
209 | * The buffer can be used for mqds, hpds, kernel queue, fence and runlists | ||
210 | * | ||
211 | * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture | ||
212 | * | ||
213 | * @get_local_mem_info: Retrieves information about GPU local memory | ||
214 | * | ||
215 | * @get_gpu_clock_counter: Retrieves GPU clock counter | ||
216 | * | ||
217 | * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz | ||
218 | * | ||
219 | * @alloc_pasid: Allocate a PASID | ||
220 | * @free_pasid: Free a PASID | ||
221 | * | ||
222 | * @program_sh_mem_settings: A function that should initiate the memory | 208 | * @program_sh_mem_settings: A function that should initiate the memory |
223 | * properties such as main aperture memory type (cache / non cached) and | 209 | * properties such as main aperture memory type (cache / non cached) and |
224 | * secondary aperture base address, size and memory type. | 210 | * secondary aperture base address, size and memory type. |
@@ -255,64 +241,16 @@ struct tile_config { | |||
255 | * | 241 | * |
256 | * @get_tile_config: Returns GPU-specific tiling mode information | 242 | * @get_tile_config: Returns GPU-specific tiling mode information |
257 | * | 243 | * |
258 | * @get_cu_info: Retrieves activated cu info | ||
259 | * | ||
260 | * @get_vram_usage: Returns current VRAM usage | ||
261 | * | ||
262 | * @create_process_vm: Create a VM address space for a given process and GPU | ||
263 | * | ||
264 | * @destroy_process_vm: Destroy a VM | ||
265 | * | ||
266 | * @get_process_page_dir: Get physical address of a VM page directory | ||
267 | * | ||
268 | * @set_vm_context_page_table_base: Program page table base for a VMID | 244 | * @set_vm_context_page_table_base: Program page table base for a VMID |
269 | * | 245 | * |
270 | * @alloc_memory_of_gpu: Allocate GPUVM memory | ||
271 | * | ||
272 | * @free_memory_of_gpu: Free GPUVM memory | ||
273 | * | ||
274 | * @map_memory_to_gpu: Map GPUVM memory into a specific VM address | ||
275 | * space. Allocates and updates page tables and page directories as | ||
276 | * needed. This function may return before all page table updates have | ||
277 | * completed. This allows multiple map operations (on multiple GPUs) | ||
278 | * to happen concurrently. Use sync_memory to synchronize with all | ||
279 | * pending updates. | ||
280 | * | ||
281 | * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space | ||
282 | * | ||
283 | * @sync_memory: Wait for pending page table updates to complete | ||
284 | * | ||
285 | * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access | ||
286 | * Pins the BO, maps it to kernel address space. Such BOs are never evicted. | ||
287 | * The kernel virtual address remains valid until the BO is freed. | ||
288 | * | ||
289 | * @restore_process_bos: Restore all BOs that belong to the | ||
290 | * process. This is intended for restoring memory mappings after a TTM | ||
291 | * eviction. | ||
292 | * | ||
293 | * @invalidate_tlbs: Invalidate TLBs for a specific PASID | 246 | * @invalidate_tlbs: Invalidate TLBs for a specific PASID |
294 | * | 247 | * |
295 | * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID | 248 | * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID |
296 | * | 249 | * |
297 | * @submit_ib: Submits an IB to the engine specified by inserting the | ||
298 | * IB to the corresponding ring (ring type). The IB is executed with the | ||
299 | * specified VMID in a user mode context. | ||
300 | * | ||
301 | * @get_vm_fault_info: Return information about a recent VM fault on | ||
302 | * GFXv7 and v8. If multiple VM faults occurred since the last call of | ||
303 | * this function, it will return information about the first of those | ||
304 | * faults. On GFXv9 VM fault information is fully contained in the IH | ||
305 | * packet and this function is not needed. | ||
306 | * | ||
307 | * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the | 250 | * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the |
308 | * IH ring entry. This function allows the KFD ISR to get the VMID | 251 | * IH ring entry. This function allows the KFD ISR to get the VMID |
309 | * from the fault status register as early as possible. | 252 | * from the fault status register as early as possible. |
310 | * | 253 | * |
311 | * @gpu_recover: let kgd reset gpu after kfd detect CPC hang | ||
312 | * | ||
313 | * @set_compute_idle: Indicates that compute is idle on a device. This | ||
314 | * can be used to change power profiles depending on compute activity. | ||
315 | * | ||
316 | * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled | 254 | * @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled |
317 | * | 255 | * |
318 | * This structure contains function pointers to services that the kgd driver | 256 | * This structure contains function pointers to services that the kgd driver |
@@ -320,21 +258,6 @@ struct tile_config { | |||
320 | * | 258 | * |
321 | */ | 259 | */ |
322 | struct kfd2kgd_calls { | 260 | struct kfd2kgd_calls { |
323 | int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, | ||
324 | void **mem_obj, uint64_t *gpu_addr, | ||
325 | void **cpu_ptr, bool mqd_gfx9); | ||
326 | |||
327 | void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); | ||
328 | |||
329 | void (*get_local_mem_info)(struct kgd_dev *kgd, | ||
330 | struct kfd_local_mem_info *mem_info); | ||
331 | uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd); | ||
332 | |||
333 | uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd); | ||
334 | |||
335 | int (*alloc_pasid)(unsigned int bits); | ||
336 | void (*free_pasid)(unsigned int pasid); | ||
337 | |||
338 | /* Register access functions */ | 261 | /* Register access functions */ |
339 | void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, | 262 | void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, |
340 | uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, | 263 | uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, |
@@ -398,49 +321,11 @@ struct kfd2kgd_calls { | |||
398 | uint64_t va, uint32_t vmid); | 321 | uint64_t va, uint32_t vmid); |
399 | int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); | 322 | int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); |
400 | 323 | ||
401 | void (*get_cu_info)(struct kgd_dev *kgd, | ||
402 | struct kfd_cu_info *cu_info); | ||
403 | uint64_t (*get_vram_usage)(struct kgd_dev *kgd); | ||
404 | |||
405 | int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm, | ||
406 | void **process_info, struct dma_fence **ef); | ||
407 | int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp, | ||
408 | unsigned int pasid, void **vm, void **process_info, | ||
409 | struct dma_fence **ef); | ||
410 | void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm); | ||
411 | void (*release_process_vm)(struct kgd_dev *kgd, void *vm); | ||
412 | uint64_t (*get_process_page_dir)(void *vm); | ||
413 | void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, | 324 | void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, |
414 | uint32_t vmid, uint64_t page_table_base); | 325 | uint32_t vmid, uint64_t page_table_base); |
415 | int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va, | ||
416 | uint64_t size, void *vm, | ||
417 | struct kgd_mem **mem, uint64_t *offset, | ||
418 | uint32_t flags); | ||
419 | int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem); | ||
420 | int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem, | ||
421 | void *vm); | ||
422 | int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem, | ||
423 | void *vm); | ||
424 | int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); | ||
425 | int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem, | ||
426 | void **kptr, uint64_t *size); | ||
427 | int (*restore_process_bos)(void *process_info, struct dma_fence **ef); | ||
428 | |||
429 | int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid); | 326 | int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid); |
430 | int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid); | 327 | int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid); |
431 | |||
432 | int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine, | ||
433 | uint32_t vmid, uint64_t gpu_addr, | ||
434 | uint32_t *ib_cmd, uint32_t ib_len); | ||
435 | |||
436 | int (*get_vm_fault_info)(struct kgd_dev *kgd, | ||
437 | struct kfd_vm_fault_info *info); | ||
438 | uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); | 328 | uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); |
439 | |||
440 | void (*gpu_recover)(struct kgd_dev *kgd); | ||
441 | |||
442 | void (*set_compute_idle)(struct kgd_dev *kgd, bool idle); | ||
443 | |||
444 | uint64_t (*get_hive_id)(struct kgd_dev *kgd); | 329 | uint64_t (*get_hive_id)(struct kgd_dev *kgd); |
445 | 330 | ||
446 | }; | 331 | }; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 5e19f5977eb1..d138ddae563d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | |||
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) | |||
967 | PP_CAP(PHM_PlatformCaps_TDRamping) || | 967 | PP_CAP(PHM_PlatformCaps_TDRamping) || |
968 | PP_CAP(PHM_PlatformCaps_TCPRamping)) { | 968 | PP_CAP(PHM_PlatformCaps_TCPRamping)) { |
969 | 969 | ||
970 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 970 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
971 | mutex_lock(&adev->grbm_idx_mutex); | 971 | mutex_lock(&adev->grbm_idx_mutex); |
972 | value = 0; | 972 | value = 0; |
973 | value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); | 973 | value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); |
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) | |||
1014 | "Failed to enable DPM DIDT.", goto error); | 1014 | "Failed to enable DPM DIDT.", goto error); |
1015 | } | 1015 | } |
1016 | mutex_unlock(&adev->grbm_idx_mutex); | 1016 | mutex_unlock(&adev->grbm_idx_mutex); |
1017 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1017 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | return 0; | 1020 | return 0; |
1021 | error: | 1021 | error: |
1022 | mutex_unlock(&adev->grbm_idx_mutex); | 1022 | mutex_unlock(&adev->grbm_idx_mutex); |
1023 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1023 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1024 | return result; | 1024 | return result; |
1025 | } | 1025 | } |
1026 | 1026 | ||
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) | |||
1034 | PP_CAP(PHM_PlatformCaps_TDRamping) || | 1034 | PP_CAP(PHM_PlatformCaps_TDRamping) || |
1035 | PP_CAP(PHM_PlatformCaps_TCPRamping)) { | 1035 | PP_CAP(PHM_PlatformCaps_TCPRamping)) { |
1036 | 1036 | ||
1037 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1037 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1038 | 1038 | ||
1039 | result = smu7_enable_didt(hwmgr, false); | 1039 | result = smu7_enable_didt(hwmgr, false); |
1040 | PP_ASSERT_WITH_CODE((result == 0), | 1040 | PP_ASSERT_WITH_CODE((result == 0), |
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) | |||
1046 | PP_ASSERT_WITH_CODE((0 == result), | 1046 | PP_ASSERT_WITH_CODE((0 == result), |
1047 | "Failed to disable DPM DIDT.", goto error); | 1047 | "Failed to disable DPM DIDT.", goto error); |
1048 | } | 1048 | } |
1049 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1049 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1050 | } | 1050 | } |
1051 | 1051 | ||
1052 | return 0; | 1052 | return 0; |
1053 | error: | 1053 | error: |
1054 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1054 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1055 | return result; | 1055 | return result; |
1056 | } | 1056 | } |
1057 | 1057 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 99a33c33a32c..101c09b212ad 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | |||
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, | |||
713 | for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { | 713 | for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { |
714 | table->WatermarkRow[1][i].MinClock = | 714 | table->WatermarkRow[1][i].MinClock = |
715 | cpu_to_le16((uint16_t) | 715 | cpu_to_le16((uint16_t) |
716 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / | 716 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz / |
717 | 1000); | 717 | 1000)); |
718 | table->WatermarkRow[1][i].MaxClock = | 718 | table->WatermarkRow[1][i].MaxClock = |
719 | cpu_to_le16((uint16_t) | 719 | cpu_to_le16((uint16_t) |
720 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / | 720 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz / |
721 | 1000); | 721 | 1000)); |
722 | table->WatermarkRow[1][i].MinUclk = | 722 | table->WatermarkRow[1][i].MinUclk = |
723 | cpu_to_le16((uint16_t) | 723 | cpu_to_le16((uint16_t) |
724 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / | 724 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz / |
725 | 1000); | 725 | 1000)); |
726 | table->WatermarkRow[1][i].MaxUclk = | 726 | table->WatermarkRow[1][i].MaxUclk = |
727 | cpu_to_le16((uint16_t) | 727 | cpu_to_le16((uint16_t) |
728 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / | 728 | (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz / |
729 | 1000); | 729 | 1000)); |
730 | table->WatermarkRow[1][i].WmSetting = (uint8_t) | 730 | table->WatermarkRow[1][i].WmSetting = (uint8_t) |
731 | wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; | 731 | wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; |
732 | } | 732 | } |
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, | |||
734 | for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { | 734 | for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { |
735 | table->WatermarkRow[0][i].MinClock = | 735 | table->WatermarkRow[0][i].MinClock = |
736 | cpu_to_le16((uint16_t) | 736 | cpu_to_le16((uint16_t) |
737 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / | 737 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz / |
738 | 1000); | 738 | 1000)); |
739 | table->WatermarkRow[0][i].MaxClock = | 739 | table->WatermarkRow[0][i].MaxClock = |
740 | cpu_to_le16((uint16_t) | 740 | cpu_to_le16((uint16_t) |
741 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / | 741 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz / |
742 | 1000); | 742 | 1000)); |
743 | table->WatermarkRow[0][i].MinUclk = | 743 | table->WatermarkRow[0][i].MinUclk = |
744 | cpu_to_le16((uint16_t) | 744 | cpu_to_le16((uint16_t) |
745 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / | 745 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz / |
746 | 1000); | 746 | 1000)); |
747 | table->WatermarkRow[0][i].MaxUclk = | 747 | table->WatermarkRow[0][i].MaxUclk = |
748 | cpu_to_le16((uint16_t) | 748 | cpu_to_le16((uint16_t) |
749 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / | 749 | (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz / |
750 | 1000); | 750 | 1000)); |
751 | table->WatermarkRow[0][i].WmSetting = (uint8_t) | 751 | table->WatermarkRow[0][i].WmSetting = (uint8_t) |
752 | wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; | 752 | wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; |
753 | } | 753 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 2d88abf97e7b..6f26cb241ecc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | |||
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) | |||
937 | 937 | ||
938 | num_se = adev->gfx.config.max_shader_engines; | 938 | num_se = adev->gfx.config.max_shader_engines; |
939 | 939 | ||
940 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 940 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
941 | 941 | ||
942 | mutex_lock(&adev->grbm_idx_mutex); | 942 | mutex_lock(&adev->grbm_idx_mutex); |
943 | for (count = 0; count < num_se; count++) { | 943 | for (count = 0; count < num_se; count++) { |
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) | |||
962 | 962 | ||
963 | vega10_didt_set_mask(hwmgr, true); | 963 | vega10_didt_set_mask(hwmgr, true); |
964 | 964 | ||
965 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 965 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
966 | 966 | ||
967 | return 0; | 967 | return 0; |
968 | } | 968 | } |
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) | |||
971 | { | 971 | { |
972 | struct amdgpu_device *adev = hwmgr->adev; | 972 | struct amdgpu_device *adev = hwmgr->adev; |
973 | 973 | ||
974 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 974 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
975 | 975 | ||
976 | vega10_didt_set_mask(hwmgr, false); | 976 | vega10_didt_set_mask(hwmgr, false); |
977 | 977 | ||
978 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 978 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
979 | 979 | ||
980 | return 0; | 980 | return 0; |
981 | } | 981 | } |
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) | |||
988 | 988 | ||
989 | num_se = adev->gfx.config.max_shader_engines; | 989 | num_se = adev->gfx.config.max_shader_engines; |
990 | 990 | ||
991 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 991 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
992 | 992 | ||
993 | mutex_lock(&adev->grbm_idx_mutex); | 993 | mutex_lock(&adev->grbm_idx_mutex); |
994 | for (count = 0; count < num_se; count++) { | 994 | for (count = 0; count < num_se; count++) { |
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) | |||
1007 | 1007 | ||
1008 | vega10_didt_set_mask(hwmgr, true); | 1008 | vega10_didt_set_mask(hwmgr, true); |
1009 | 1009 | ||
1010 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1010 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1011 | 1011 | ||
1012 | vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); | 1012 | vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); |
1013 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) | 1013 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) |
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) | |||
1024 | struct amdgpu_device *adev = hwmgr->adev; | 1024 | struct amdgpu_device *adev = hwmgr->adev; |
1025 | uint32_t data; | 1025 | uint32_t data; |
1026 | 1026 | ||
1027 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1027 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1028 | 1028 | ||
1029 | vega10_didt_set_mask(hwmgr, false); | 1029 | vega10_didt_set_mask(hwmgr, false); |
1030 | 1030 | ||
1031 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1031 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1032 | 1032 | ||
1033 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) { | 1033 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) { |
1034 | data = 0x00000000; | 1034 | data = 0x00000000; |
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) | |||
1049 | 1049 | ||
1050 | num_se = adev->gfx.config.max_shader_engines; | 1050 | num_se = adev->gfx.config.max_shader_engines; |
1051 | 1051 | ||
1052 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1052 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1053 | 1053 | ||
1054 | mutex_lock(&adev->grbm_idx_mutex); | 1054 | mutex_lock(&adev->grbm_idx_mutex); |
1055 | for (count = 0; count < num_se; count++) { | 1055 | for (count = 0; count < num_se; count++) { |
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) | |||
1070 | 1070 | ||
1071 | vega10_didt_set_mask(hwmgr, true); | 1071 | vega10_didt_set_mask(hwmgr, true); |
1072 | 1072 | ||
1073 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1073 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1074 | 1074 | ||
1075 | return 0; | 1075 | return 0; |
1076 | } | 1076 | } |
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) | |||
1079 | { | 1079 | { |
1080 | struct amdgpu_device *adev = hwmgr->adev; | 1080 | struct amdgpu_device *adev = hwmgr->adev; |
1081 | 1081 | ||
1082 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1082 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1083 | 1083 | ||
1084 | vega10_didt_set_mask(hwmgr, false); | 1084 | vega10_didt_set_mask(hwmgr, false); |
1085 | 1085 | ||
1086 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1086 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1087 | 1087 | ||
1088 | return 0; | 1088 | return 0; |
1089 | } | 1089 | } |
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) | |||
1097 | 1097 | ||
1098 | num_se = adev->gfx.config.max_shader_engines; | 1098 | num_se = adev->gfx.config.max_shader_engines; |
1099 | 1099 | ||
1100 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1100 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1101 | 1101 | ||
1102 | vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); | 1102 | vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); |
1103 | 1103 | ||
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) | |||
1118 | 1118 | ||
1119 | vega10_didt_set_mask(hwmgr, true); | 1119 | vega10_didt_set_mask(hwmgr, true); |
1120 | 1120 | ||
1121 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1121 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1122 | 1122 | ||
1123 | vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); | 1123 | vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); |
1124 | 1124 | ||
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) | |||
1138 | struct amdgpu_device *adev = hwmgr->adev; | 1138 | struct amdgpu_device *adev = hwmgr->adev; |
1139 | uint32_t data; | 1139 | uint32_t data; |
1140 | 1140 | ||
1141 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1141 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1142 | 1142 | ||
1143 | vega10_didt_set_mask(hwmgr, false); | 1143 | vega10_didt_set_mask(hwmgr, false); |
1144 | 1144 | ||
1145 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1145 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1146 | 1146 | ||
1147 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) { | 1147 | if (PP_CAP(PHM_PlatformCaps_GCEDC)) { |
1148 | data = 0x00000000; | 1148 | data = 0x00000000; |
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) | |||
1160 | struct amdgpu_device *adev = hwmgr->adev; | 1160 | struct amdgpu_device *adev = hwmgr->adev; |
1161 | int result; | 1161 | int result; |
1162 | 1162 | ||
1163 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 1163 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
1164 | 1164 | ||
1165 | mutex_lock(&adev->grbm_idx_mutex); | 1165 | mutex_lock(&adev->grbm_idx_mutex); |
1166 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); | 1166 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); |
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) | |||
1173 | 1173 | ||
1174 | vega10_didt_set_mask(hwmgr, false); | 1174 | vega10_didt_set_mask(hwmgr, false); |
1175 | 1175 | ||
1176 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 1176 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
1177 | 1177 | ||
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 57143d51e3ee..f2daf00cc911 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
@@ -120,6 +120,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
120 | data->registry_data.disable_auto_wattman = 1; | 120 | data->registry_data.disable_auto_wattman = 1; |
121 | data->registry_data.auto_wattman_debug = 0; | 121 | data->registry_data.auto_wattman_debug = 0; |
122 | data->registry_data.auto_wattman_sample_period = 100; | 122 | data->registry_data.auto_wattman_sample_period = 100; |
123 | data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD; | ||
123 | data->registry_data.auto_wattman_threshold = 50; | 124 | data->registry_data.auto_wattman_threshold = 50; |
124 | data->registry_data.gfxoff_controlled_by_driver = 1; | 125 | data->registry_data.gfxoff_controlled_by_driver = 1; |
125 | data->gfxoff_allowed = false; | 126 | data->gfxoff_allowed = false; |
@@ -829,6 +830,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr) | |||
829 | return 0; | 830 | return 0; |
830 | } | 831 | } |
831 | 832 | ||
833 | static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr) | ||
834 | { | ||
835 | struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); | ||
836 | |||
837 | if (data->smu_features[GNLD_DPM_UCLK].enabled) | ||
838 | return smum_send_msg_to_smc_with_parameter(hwmgr, | ||
839 | PPSMC_MSG_SetUclkFastSwitch, | ||
840 | 1); | ||
841 | |||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr) | ||
846 | { | ||
847 | struct vega20_hwmgr *data = | ||
848 | (struct vega20_hwmgr *)(hwmgr->backend); | ||
849 | |||
850 | return smum_send_msg_to_smc_with_parameter(hwmgr, | ||
851 | PPSMC_MSG_SetFclkGfxClkRatio, | ||
852 | data->registry_data.fclk_gfxclk_ratio); | ||
853 | } | ||
854 | |||
832 | static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) | 855 | static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) |
833 | { | 856 | { |
834 | struct vega20_hwmgr *data = | 857 | struct vega20_hwmgr *data = |
@@ -1532,6 +1555,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) | |||
1532 | "[EnableDPMTasks] Failed to enable all smu features!", | 1555 | "[EnableDPMTasks] Failed to enable all smu features!", |
1533 | return result); | 1556 | return result); |
1534 | 1557 | ||
1558 | result = vega20_notify_smc_display_change(hwmgr); | ||
1559 | PP_ASSERT_WITH_CODE(!result, | ||
1560 | "[EnableDPMTasks] Failed to notify smc display change!", | ||
1561 | return result); | ||
1562 | |||
1563 | result = vega20_send_clock_ratio(hwmgr); | ||
1564 | PP_ASSERT_WITH_CODE(!result, | ||
1565 | "[EnableDPMTasks] Failed to send clock ratio!", | ||
1566 | return result); | ||
1567 | |||
1535 | /* Initialize UVD/VCE powergating state */ | 1568 | /* Initialize UVD/VCE powergating state */ |
1536 | vega20_init_powergate_state(hwmgr); | 1569 | vega20_init_powergate_state(hwmgr); |
1537 | 1570 | ||
@@ -1972,19 +2005,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, | |||
1972 | return ret; | 2005 | return ret; |
1973 | } | 2006 | } |
1974 | 2007 | ||
1975 | static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr, | ||
1976 | bool has_disp) | ||
1977 | { | ||
1978 | struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); | ||
1979 | |||
1980 | if (data->smu_features[GNLD_DPM_UCLK].enabled) | ||
1981 | return smum_send_msg_to_smc_with_parameter(hwmgr, | ||
1982 | PPSMC_MSG_SetUclkFastSwitch, | ||
1983 | has_disp ? 1 : 0); | ||
1984 | |||
1985 | return 0; | ||
1986 | } | ||
1987 | |||
1988 | int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, | 2008 | int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr, |
1989 | struct pp_display_clock_request *clock_req) | 2009 | struct pp_display_clock_request *clock_req) |
1990 | { | 2010 | { |
@@ -2044,13 +2064,6 @@ static int vega20_notify_smc_display_config_after_ps_adjustment( | |||
2044 | struct pp_display_clock_request clock_req; | 2064 | struct pp_display_clock_request clock_req; |
2045 | int ret = 0; | 2065 | int ret = 0; |
2046 | 2066 | ||
2047 | if ((hwmgr->display_config->num_display > 1) && | ||
2048 | !hwmgr->display_config->multi_monitor_in_sync && | ||
2049 | !hwmgr->display_config->nb_pstate_switch_disable) | ||
2050 | vega20_notify_smc_display_change(hwmgr, false); | ||
2051 | else | ||
2052 | vega20_notify_smc_display_change(hwmgr, true); | ||
2053 | |||
2054 | min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; | 2067 | min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; |
2055 | min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; | 2068 | min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; |
2056 | min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; | 2069 | min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; |
@@ -2742,7 +2755,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, | |||
2742 | for (i = 0; i < clocks.num_levels; i++) | 2755 | for (i = 0; i < clocks.num_levels; i++) |
2743 | size += sprintf(buf + size, "%d: %uMhz %s\n", | 2756 | size += sprintf(buf + size, "%d: %uMhz %s\n", |
2744 | i, clocks.data[i].clocks_in_khz / 1000, | 2757 | i, clocks.data[i].clocks_in_khz / 1000, |
2745 | (clocks.data[i].clocks_in_khz == now) ? "*" : ""); | 2758 | (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); |
2746 | break; | 2759 | break; |
2747 | 2760 | ||
2748 | case PP_MCLK: | 2761 | case PP_MCLK: |
@@ -2759,7 +2772,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, | |||
2759 | for (i = 0; i < clocks.num_levels; i++) | 2772 | for (i = 0; i < clocks.num_levels; i++) |
2760 | size += sprintf(buf + size, "%d: %uMhz %s\n", | 2773 | size += sprintf(buf + size, "%d: %uMhz %s\n", |
2761 | i, clocks.data[i].clocks_in_khz / 1000, | 2774 | i, clocks.data[i].clocks_in_khz / 1000, |
2762 | (clocks.data[i].clocks_in_khz == now) ? "*" : ""); | 2775 | (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); |
2763 | break; | 2776 | break; |
2764 | 2777 | ||
2765 | case PP_PCIE: | 2778 | case PP_PCIE: |
@@ -3441,109 +3454,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr, | |||
3441 | 3454 | ||
3442 | static const struct pp_hwmgr_func vega20_hwmgr_funcs = { | 3455 | static const struct pp_hwmgr_func vega20_hwmgr_funcs = { |
3443 | /* init/fini related */ | 3456 | /* init/fini related */ |
3444 | .backend_init = | 3457 | .backend_init = vega20_hwmgr_backend_init, |
3445 | vega20_hwmgr_backend_init, | 3458 | .backend_fini = vega20_hwmgr_backend_fini, |
3446 | .backend_fini = | 3459 | .asic_setup = vega20_setup_asic_task, |
3447 | vega20_hwmgr_backend_fini, | 3460 | .power_off_asic = vega20_power_off_asic, |
3448 | .asic_setup = | 3461 | .dynamic_state_management_enable = vega20_enable_dpm_tasks, |
3449 | vega20_setup_asic_task, | 3462 | .dynamic_state_management_disable = vega20_disable_dpm_tasks, |
3450 | .power_off_asic = | ||
3451 | vega20_power_off_asic, | ||
3452 | .dynamic_state_management_enable = | ||
3453 | vega20_enable_dpm_tasks, | ||
3454 | .dynamic_state_management_disable = | ||
3455 | vega20_disable_dpm_tasks, | ||
3456 | /* power state related */ | 3463 | /* power state related */ |
3457 | .apply_clocks_adjust_rules = | 3464 | .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, |
3458 | vega20_apply_clocks_adjust_rules, | 3465 | .pre_display_config_changed = vega20_pre_display_configuration_changed_task, |
3459 | .pre_display_config_changed = | 3466 | .display_config_changed = vega20_display_configuration_changed_task, |
3460 | vega20_pre_display_configuration_changed_task, | ||
3461 | .display_config_changed = | ||
3462 | vega20_display_configuration_changed_task, | ||
3463 | .check_smc_update_required_for_display_configuration = | 3467 | .check_smc_update_required_for_display_configuration = |
3464 | vega20_check_smc_update_required_for_display_configuration, | 3468 | vega20_check_smc_update_required_for_display_configuration, |
3465 | .notify_smc_display_config_after_ps_adjustment = | 3469 | .notify_smc_display_config_after_ps_adjustment = |
3466 | vega20_notify_smc_display_config_after_ps_adjustment, | 3470 | vega20_notify_smc_display_config_after_ps_adjustment, |
3467 | /* export to DAL */ | 3471 | /* export to DAL */ |
3468 | .get_sclk = | 3472 | .get_sclk = vega20_dpm_get_sclk, |
3469 | vega20_dpm_get_sclk, | 3473 | .get_mclk = vega20_dpm_get_mclk, |
3470 | .get_mclk = | 3474 | .get_dal_power_level = vega20_get_dal_power_level, |
3471 | vega20_dpm_get_mclk, | 3475 | .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency, |
3472 | .get_dal_power_level = | 3476 | .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage, |
3473 | vega20_get_dal_power_level, | 3477 | .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges, |
3474 | .get_clock_by_type_with_latency = | 3478 | .display_clock_voltage_request = vega20_display_clock_voltage_request, |
3475 | vega20_get_clock_by_type_with_latency, | 3479 | .get_performance_level = vega20_get_performance_level, |
3476 | .get_clock_by_type_with_voltage = | ||
3477 | vega20_get_clock_by_type_with_voltage, | ||
3478 | .set_watermarks_for_clocks_ranges = | ||
3479 | vega20_set_watermarks_for_clocks_ranges, | ||
3480 | .display_clock_voltage_request = | ||
3481 | vega20_display_clock_voltage_request, | ||
3482 | .get_performance_level = | ||
3483 | vega20_get_performance_level, | ||
3484 | /* UMD pstate, profile related */ | 3480 | /* UMD pstate, profile related */ |
3485 | .force_dpm_level = | 3481 | .force_dpm_level = vega20_dpm_force_dpm_level, |
3486 | vega20_dpm_force_dpm_level, | 3482 | .get_power_profile_mode = vega20_get_power_profile_mode, |
3487 | .get_power_profile_mode = | 3483 | .set_power_profile_mode = vega20_set_power_profile_mode, |
3488 | vega20_get_power_profile_mode, | ||
3489 | .set_power_profile_mode = | ||
3490 | vega20_set_power_profile_mode, | ||
3491 | /* od related */ | 3484 | /* od related */ |
3492 | .set_power_limit = | 3485 | .set_power_limit = vega20_set_power_limit, |
3493 | vega20_set_power_limit, | 3486 | .get_sclk_od = vega20_get_sclk_od, |
3494 | .get_sclk_od = | 3487 | .set_sclk_od = vega20_set_sclk_od, |
3495 | vega20_get_sclk_od, | 3488 | .get_mclk_od = vega20_get_mclk_od, |
3496 | .set_sclk_od = | 3489 | .set_mclk_od = vega20_set_mclk_od, |
3497 | vega20_set_sclk_od, | 3490 | .odn_edit_dpm_table = vega20_odn_edit_dpm_table, |
3498 | .get_mclk_od = | ||
3499 | vega20_get_mclk_od, | ||
3500 | .set_mclk_od = | ||
3501 | vega20_set_mclk_od, | ||
3502 | .odn_edit_dpm_table = | ||
3503 | vega20_odn_edit_dpm_table, | ||
3504 | /* for sysfs to retrive/set gfxclk/memclk */ | 3491 | /* for sysfs to retrive/set gfxclk/memclk */ |
3505 | .force_clock_level = | 3492 | .force_clock_level = vega20_force_clock_level, |
3506 | vega20_force_clock_level, | 3493 | .print_clock_levels = vega20_print_clock_levels, |
3507 | .print_clock_levels = | 3494 | .read_sensor = vega20_read_sensor, |
3508 | vega20_print_clock_levels, | ||
3509 | .read_sensor = | ||
3510 | vega20_read_sensor, | ||
3511 | /* powergate related */ | 3495 | /* powergate related */ |
3512 | .powergate_uvd = | 3496 | .powergate_uvd = vega20_power_gate_uvd, |
3513 | vega20_power_gate_uvd, | 3497 | .powergate_vce = vega20_power_gate_vce, |
3514 | .powergate_vce = | ||
3515 | vega20_power_gate_vce, | ||
3516 | /* thermal related */ | 3498 | /* thermal related */ |
3517 | .start_thermal_controller = | 3499 | .start_thermal_controller = vega20_start_thermal_controller, |
3518 | vega20_start_thermal_controller, | 3500 | .stop_thermal_controller = vega20_thermal_stop_thermal_controller, |
3519 | .stop_thermal_controller = | 3501 | .get_thermal_temperature_range = vega20_get_thermal_temperature_range, |
3520 | vega20_thermal_stop_thermal_controller, | 3502 | .register_irq_handlers = smu9_register_irq_handlers, |
3521 | .get_thermal_temperature_range = | 3503 | .disable_smc_firmware_ctf = vega20_thermal_disable_alert, |
3522 | vega20_get_thermal_temperature_range, | ||
3523 | .register_irq_handlers = | ||
3524 | smu9_register_irq_handlers, | ||
3525 | .disable_smc_firmware_ctf = | ||
3526 | vega20_thermal_disable_alert, | ||
3527 | /* fan control related */ | 3504 | /* fan control related */ |
3528 | .get_fan_speed_percent = | 3505 | .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent, |
3529 | vega20_fan_ctrl_get_fan_speed_percent, | 3506 | .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent, |
3530 | .set_fan_speed_percent = | 3507 | .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, |
3531 | vega20_fan_ctrl_set_fan_speed_percent, | 3508 | .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, |
3532 | .get_fan_speed_info = | 3509 | .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm, |
3533 | vega20_fan_ctrl_get_fan_speed_info, | 3510 | .get_fan_control_mode = vega20_get_fan_control_mode, |
3534 | .get_fan_speed_rpm = | 3511 | .set_fan_control_mode = vega20_set_fan_control_mode, |
3535 | vega20_fan_ctrl_get_fan_speed_rpm, | ||
3536 | .set_fan_speed_rpm = | ||
3537 | vega20_fan_ctrl_set_fan_speed_rpm, | ||
3538 | .get_fan_control_mode = | ||
3539 | vega20_get_fan_control_mode, | ||
3540 | .set_fan_control_mode = | ||
3541 | vega20_set_fan_control_mode, | ||
3542 | /* smu memory related */ | 3512 | /* smu memory related */ |
3543 | .notify_cac_buffer_info = | 3513 | .notify_cac_buffer_info = vega20_notify_cac_buffer_info, |
3544 | vega20_notify_cac_buffer_info, | 3514 | .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, |
3545 | .enable_mgpu_fan_boost = | ||
3546 | vega20_enable_mgpu_fan_boost, | ||
3547 | }; | 3515 | }; |
3548 | 3516 | ||
3549 | int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) | 3517 | int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 56fe6a0d42e8..25faaa5c5b10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h | |||
@@ -328,6 +328,7 @@ struct vega20_registry_data { | |||
328 | uint8_t disable_auto_wattman; | 328 | uint8_t disable_auto_wattman; |
329 | uint32_t auto_wattman_debug; | 329 | uint32_t auto_wattman_debug; |
330 | uint32_t auto_wattman_sample_period; | 330 | uint32_t auto_wattman_sample_period; |
331 | uint32_t fclk_gfxclk_ratio; | ||
331 | uint8_t auto_wattman_threshold; | 332 | uint8_t auto_wattman_threshold; |
332 | uint8_t log_avfs_param; | 333 | uint8_t log_avfs_param; |
333 | uint8_t enable_enginess; | 334 | uint8_t enable_enginess; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index e5a60aa44b5d..07d180ce4d18 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "hardwaremanager.h" | 28 | #include "hardwaremanager.h" |
29 | #include "hwmgr_ppt.h" | 29 | #include "hwmgr_ppt.h" |
30 | #include "ppatomctrl.h" | 30 | #include "ppatomctrl.h" |
31 | #include "hwmgr_ppt.h" | ||
32 | #include "power_state.h" | 31 | #include "power_state.h" |
33 | #include "smu_helper.h" | 32 | #include "smu_helper.h" |
34 | 33 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h index 65eb630bfea3..94bf7b649c20 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h | |||
@@ -40,10 +40,6 @@ | |||
40 | #include "bif/bif_5_0_d.h" | 40 | #include "bif/bif_5_0_d.h" |
41 | #include "bif/bif_5_0_sh_mask.h" | 41 | #include "bif/bif_5_0_sh_mask.h" |
42 | 42 | ||
43 | |||
44 | #include "bif/bif_5_0_d.h" | ||
45 | #include "bif/bif_5_0_sh_mask.h" | ||
46 | |||
47 | #include "dce/dce_10_0_d.h" | 43 | #include "dce/dce_10_0_d.h" |
48 | #include "dce/dce_10_0_sh_mask.h" | 44 | #include "dce/dce_10_0_sh_mask.h" |
49 | 45 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index 45d64a81e945..4f63a736ea0e 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h | |||
@@ -105,7 +105,8 @@ | |||
105 | #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B | 105 | #define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B |
106 | #define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C | 106 | #define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C |
107 | #define PPSMC_MSG_WaflTest 0x4D | 107 | #define PPSMC_MSG_WaflTest 0x4D |
108 | // Unused ID 0x4E to 0x50 | 108 | #define PPSMC_MSG_SetFclkGfxClkRatio 0x4E |
109 | // Unused ID 0x4F to 0x50 | ||
109 | #define PPSMC_MSG_AllowGfxOff 0x51 | 110 | #define PPSMC_MSG_AllowGfxOff 0x51 |
110 | #define PPSMC_MSG_DisallowGfxOff 0x52 | 111 | #define PPSMC_MSG_DisallowGfxOff 0x52 |
111 | #define PPSMC_MSG_GetPptLimit 0x53 | 112 | #define PPSMC_MSG_GetPptLimit 0x53 |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 872d3824337b..2b2c26616902 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | #include "smu7_hwmgr.h" | 45 | #include "smu7_hwmgr.h" |
46 | #include "hardwaremanager.h" | 46 | #include "hardwaremanager.h" |
47 | #include "ppatomctrl.h" | ||
48 | #include "atombios.h" | 47 | #include "atombios.h" |
49 | #include "pppcielanes.h" | 48 | #include "pppcielanes.h" |
50 | 49 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index d0eb8ab50148..d111dd4e03d7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include "rv_ppsmc.h" | 29 | #include "rv_ppsmc.h" |
30 | #include "smu10_driver_if.h" | 30 | #include "smu10_driver_if.h" |
31 | #include "smu10.h" | 31 | #include "smu10.h" |
32 | #include "ppatomctrl.h" | ||
33 | #include "pp_debug.h" | 32 | #include "pp_debug.h" |
34 | 33 | ||
35 | 34 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index 09b844ec3eab..e2787e14a500 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/ktime.h> | ||
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | 30 | ||
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr) | |||
61 | mmSMU_MP1_SRBM2P_ARG_0); | 62 | mmSMU_MP1_SRBM2P_ARG_0); |
62 | } | 63 | } |
63 | 64 | ||
64 | static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) | 65 | /* Send a message to the SMC, and wait for its response.*/ |
66 | static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | ||
67 | uint16_t msg, uint32_t parameter) | ||
65 | { | 68 | { |
66 | int result = 0; | 69 | int result = 0; |
70 | ktime_t t_start; | ||
71 | s64 elapsed_us; | ||
67 | 72 | ||
68 | if (hwmgr == NULL || hwmgr->device == NULL) | 73 | if (hwmgr == NULL || hwmgr->device == NULL) |
69 | return -EINVAL; | 74 | return -EINVAL; |
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg) | |||
74 | /* Read the last message to SMU, to report actual cause */ | 79 | /* Read the last message to SMU, to report actual cause */ |
75 | uint32_t val = cgs_read_register(hwmgr->device, | 80 | uint32_t val = cgs_read_register(hwmgr->device, |
76 | mmSMU_MP1_SRBM2P_MSG_0); | 81 | mmSMU_MP1_SRBM2P_MSG_0); |
77 | pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg); | 82 | pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n", |
78 | pr_err("SMU still servicing msg (0x%04x)\n", val); | 83 | __func__, msg, val); |
79 | return result; | 84 | return result; |
80 | } | 85 | } |
86 | t_start = ktime_get(); | ||
87 | |||
88 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); | ||
81 | 89 | ||
82 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); | 90 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0); |
83 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); | 91 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg); |
84 | 92 | ||
85 | return 0; | 93 | result = PHM_WAIT_FIELD_UNEQUAL(hwmgr, |
94 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); | ||
95 | |||
96 | elapsed_us = ktime_us_delta(ktime_get(), t_start); | ||
97 | |||
98 | WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n", | ||
99 | __func__, msg, parameter, elapsed_us); | ||
100 | |||
101 | return result; | ||
86 | } | 102 | } |
87 | 103 | ||
88 | /* Send a message to the SMC, and wait for its response.*/ | ||
89 | static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) | 104 | static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) |
90 | { | 105 | { |
91 | int result = 0; | 106 | return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0); |
92 | |||
93 | result = smu8_send_msg_to_smc_async(hwmgr, msg); | ||
94 | if (result != 0) | ||
95 | return result; | ||
96 | |||
97 | return PHM_WAIT_FIELD_UNEQUAL(hwmgr, | ||
98 | SMU_MP1_SRBM2P_RESP_0, CONTENT, 0); | ||
99 | } | 107 | } |
100 | 108 | ||
101 | static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr, | 109 | static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr, |
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr, | |||
135 | return result; | 143 | return result; |
136 | } | 144 | } |
137 | 145 | ||
138 | static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, | ||
139 | uint16_t msg, uint32_t parameter) | ||
140 | { | ||
141 | if (hwmgr == NULL || hwmgr->device == NULL) | ||
142 | return -EINVAL; | ||
143 | |||
144 | cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter); | ||
145 | |||
146 | return smu8_send_msg_to_smc(hwmgr, msg); | ||
147 | } | ||
148 | |||
149 | static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr, | 146 | static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr, |
150 | uint32_t firmware) | 147 | uint32_t firmware) |
151 | { | 148 | { |
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr) | |||
737 | 734 | ||
738 | cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); | 735 | cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); |
739 | hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA); | 736 | hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA); |
737 | pr_info("smu version %02d.%02d.%02d\n", | ||
738 | ((hwmgr->smu_version >> 16) & 0xFF), | ||
739 | ((hwmgr->smu_version >> 8) & 0xFF), | ||
740 | (hwmgr->smu_version & 0xFF)); | ||
740 | adev->pm.fw_version = hwmgr->smu_version >> 8; | 741 | adev->pm.fw_version = hwmgr->smu_version >> 8; |
741 | 742 | ||
742 | return smu8_request_smu_load_fw(hwmgr); | 743 | return smu8_request_smu_load_fw(hwmgr); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 99d5e4f98f49..a6edd5df33b0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); | |||
37 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); | 37 | MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); |
38 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); | 38 | MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); |
39 | MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); | 39 | MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); |
40 | MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin"); | ||
40 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); | 41 | MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); |
41 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); | 42 | MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); |
42 | MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); | 43 | MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); |
44 | MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin"); | ||
43 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); | 45 | MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); |
46 | MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin"); | ||
44 | MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); | 47 | MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); |
45 | MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); | 48 | MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); |
46 | MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); | 49 | MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 9f71512b2510..1e69300f6175 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | |||
@@ -40,7 +40,6 @@ | |||
40 | 40 | ||
41 | #include "smu7_hwmgr.h" | 41 | #include "smu7_hwmgr.h" |
42 | #include "hardwaremanager.h" | 42 | #include "hardwaremanager.h" |
43 | #include "ppatomctrl.h" | ||
44 | #include "atombios.h" | 43 | #include "atombios.h" |
45 | #include "pppcielanes.h" | 44 | #include "pppcielanes.h" |
46 | 45 | ||
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index e6c4cd3dc50e..bfc65040dfcb 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h | |||
@@ -104,8 +104,6 @@ struct ast_private { | |||
104 | int fb_mtrr; | 104 | int fb_mtrr; |
105 | 105 | ||
106 | struct { | 106 | struct { |
107 | struct drm_global_reference mem_global_ref; | ||
108 | struct ttm_bo_global_ref bo_global_ref; | ||
109 | struct ttm_bo_device bdev; | 107 | struct ttm_bo_device bdev; |
110 | } ttm; | 108 | } ttm; |
111 | 109 | ||
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index fe354ebf374d..c168d62fe8f9 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd) | |||
36 | return container_of(bd, struct ast_private, ttm.bdev); | 36 | return container_of(bd, struct ast_private, ttm.bdev); |
37 | } | 37 | } |
38 | 38 | ||
39 | static int | ||
40 | ast_ttm_mem_global_init(struct drm_global_reference *ref) | ||
41 | { | ||
42 | return ttm_mem_global_init(ref->object); | ||
43 | } | ||
44 | |||
45 | static void | ||
46 | ast_ttm_mem_global_release(struct drm_global_reference *ref) | ||
47 | { | ||
48 | ttm_mem_global_release(ref->object); | ||
49 | } | ||
50 | |||
51 | static int ast_ttm_global_init(struct ast_private *ast) | ||
52 | { | ||
53 | struct drm_global_reference *global_ref; | ||
54 | int r; | ||
55 | |||
56 | global_ref = &ast->ttm.mem_global_ref; | ||
57 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
58 | global_ref->size = sizeof(struct ttm_mem_global); | ||
59 | global_ref->init = &ast_ttm_mem_global_init; | ||
60 | global_ref->release = &ast_ttm_mem_global_release; | ||
61 | r = drm_global_item_ref(global_ref); | ||
62 | if (r != 0) { | ||
63 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
64 | "subsystem.\n"); | ||
65 | return r; | ||
66 | } | ||
67 | |||
68 | ast->ttm.bo_global_ref.mem_glob = | ||
69 | ast->ttm.mem_global_ref.object; | ||
70 | global_ref = &ast->ttm.bo_global_ref.ref; | ||
71 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
72 | global_ref->size = sizeof(struct ttm_bo_global); | ||
73 | global_ref->init = &ttm_bo_global_init; | ||
74 | global_ref->release = &ttm_bo_global_release; | ||
75 | r = drm_global_item_ref(global_ref); | ||
76 | if (r != 0) { | ||
77 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
78 | drm_global_item_unref(&ast->ttm.mem_global_ref); | ||
79 | return r; | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void | ||
85 | ast_ttm_global_release(struct ast_private *ast) | ||
86 | { | ||
87 | if (ast->ttm.mem_global_ref.release == NULL) | ||
88 | return; | ||
89 | |||
90 | drm_global_item_unref(&ast->ttm.bo_global_ref.ref); | ||
91 | drm_global_item_unref(&ast->ttm.mem_global_ref); | ||
92 | ast->ttm.mem_global_ref.release = NULL; | ||
93 | } | ||
94 | |||
95 | |||
96 | static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) | 39 | static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo) |
97 | { | 40 | { |
98 | struct ast_bo *bo; | 41 | struct ast_bo *bo; |
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast) | |||
232 | struct drm_device *dev = ast->dev; | 175 | struct drm_device *dev = ast->dev; |
233 | struct ttm_bo_device *bdev = &ast->ttm.bdev; | 176 | struct ttm_bo_device *bdev = &ast->ttm.bdev; |
234 | 177 | ||
235 | ret = ast_ttm_global_init(ast); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | |||
239 | ret = ttm_bo_device_init(&ast->ttm.bdev, | 178 | ret = ttm_bo_device_init(&ast->ttm.bdev, |
240 | ast->ttm.bo_global_ref.ref.object, | ||
241 | &ast_bo_driver, | 179 | &ast_bo_driver, |
242 | dev->anon_inode->i_mapping, | 180 | dev->anon_inode->i_mapping, |
243 | DRM_FILE_PAGE_OFFSET, | 181 | DRM_FILE_PAGE_OFFSET, |
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast) | |||
268 | 206 | ||
269 | ttm_bo_device_release(&ast->ttm.bdev); | 207 | ttm_bo_device_release(&ast->ttm.bdev); |
270 | 208 | ||
271 | ast_ttm_global_release(ast); | ||
272 | |||
273 | arch_phys_wc_del(ast->fb_mtrr); | 209 | arch_phys_wc_del(ast->fb_mtrr); |
274 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), | 210 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), |
275 | pci_resource_len(dev->pdev, 0)); | 211 | pci_resource_len(dev->pdev, 0)); |
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 577a8b917cb9..fb38c8b857b5 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h | |||
@@ -77,8 +77,6 @@ struct bochs_device { | |||
77 | 77 | ||
78 | /* ttm */ | 78 | /* ttm */ |
79 | struct { | 79 | struct { |
80 | struct drm_global_reference mem_global_ref; | ||
81 | struct ttm_bo_global_ref bo_global_ref; | ||
82 | struct ttm_bo_device bdev; | 80 | struct ttm_bo_device bdev; |
83 | bool initialized; | 81 | bool initialized; |
84 | } ttm; | 82 | } ttm; |
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index e6ccf7fa92d4..0980411e41bf 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c | |||
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd) | |||
16 | return container_of(bd, struct bochs_device, ttm.bdev); | 16 | return container_of(bd, struct bochs_device, ttm.bdev); |
17 | } | 17 | } |
18 | 18 | ||
19 | static int bochs_ttm_mem_global_init(struct drm_global_reference *ref) | ||
20 | { | ||
21 | return ttm_mem_global_init(ref->object); | ||
22 | } | ||
23 | |||
24 | static void bochs_ttm_mem_global_release(struct drm_global_reference *ref) | ||
25 | { | ||
26 | ttm_mem_global_release(ref->object); | ||
27 | } | ||
28 | |||
29 | static int bochs_ttm_global_init(struct bochs_device *bochs) | ||
30 | { | ||
31 | struct drm_global_reference *global_ref; | ||
32 | int r; | ||
33 | |||
34 | global_ref = &bochs->ttm.mem_global_ref; | ||
35 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
36 | global_ref->size = sizeof(struct ttm_mem_global); | ||
37 | global_ref->init = &bochs_ttm_mem_global_init; | ||
38 | global_ref->release = &bochs_ttm_mem_global_release; | ||
39 | r = drm_global_item_ref(global_ref); | ||
40 | if (r != 0) { | ||
41 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
42 | "subsystem.\n"); | ||
43 | return r; | ||
44 | } | ||
45 | |||
46 | bochs->ttm.bo_global_ref.mem_glob = | ||
47 | bochs->ttm.mem_global_ref.object; | ||
48 | global_ref = &bochs->ttm.bo_global_ref.ref; | ||
49 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
50 | global_ref->size = sizeof(struct ttm_bo_global); | ||
51 | global_ref->init = &ttm_bo_global_init; | ||
52 | global_ref->release = &ttm_bo_global_release; | ||
53 | r = drm_global_item_ref(global_ref); | ||
54 | if (r != 0) { | ||
55 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
56 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
57 | return r; | ||
58 | } | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static void bochs_ttm_global_release(struct bochs_device *bochs) | ||
64 | { | ||
65 | if (bochs->ttm.mem_global_ref.release == NULL) | ||
66 | return; | ||
67 | |||
68 | drm_global_item_unref(&bochs->ttm.bo_global_ref.ref); | ||
69 | drm_global_item_unref(&bochs->ttm.mem_global_ref); | ||
70 | bochs->ttm.mem_global_ref.release = NULL; | ||
71 | } | ||
72 | |||
73 | |||
74 | static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) | 19 | static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo) |
75 | { | 20 | { |
76 | struct bochs_bo *bo; | 21 | struct bochs_bo *bo; |
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs) | |||
208 | struct ttm_bo_device *bdev = &bochs->ttm.bdev; | 153 | struct ttm_bo_device *bdev = &bochs->ttm.bdev; |
209 | int ret; | 154 | int ret; |
210 | 155 | ||
211 | ret = bochs_ttm_global_init(bochs); | ||
212 | if (ret) | ||
213 | return ret; | ||
214 | |||
215 | ret = ttm_bo_device_init(&bochs->ttm.bdev, | 156 | ret = ttm_bo_device_init(&bochs->ttm.bdev, |
216 | bochs->ttm.bo_global_ref.ref.object, | ||
217 | &bochs_bo_driver, | 157 | &bochs_bo_driver, |
218 | bochs->dev->anon_inode->i_mapping, | 158 | bochs->dev->anon_inode->i_mapping, |
219 | DRM_FILE_PAGE_OFFSET, | 159 | DRM_FILE_PAGE_OFFSET, |
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs) | |||
240 | return; | 180 | return; |
241 | 181 | ||
242 | ttm_bo_device_release(&bochs->ttm.bdev); | 182 | ttm_bo_device_release(&bochs->ttm.bdev); |
243 | bochs_ttm_global_release(bochs); | ||
244 | bochs->ttm.initialized = false; | 183 | bochs->ttm.initialized = false; |
245 | } | 184 | } |
246 | 185 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index a29f87e98d9d..f2b2e0d169fa 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h | |||
@@ -136,8 +136,6 @@ struct cirrus_device { | |||
136 | int fb_mtrr; | 136 | int fb_mtrr; |
137 | 137 | ||
138 | struct { | 138 | struct { |
139 | struct drm_global_reference mem_global_ref; | ||
140 | struct ttm_bo_global_ref bo_global_ref; | ||
141 | struct ttm_bo_device bdev; | 139 | struct ttm_bo_device bdev; |
142 | } ttm; | 140 | } ttm; |
143 | bool mm_inited; | 141 | bool mm_inited; |
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index f21953243790..e075810b4bd4 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd) | |||
36 | return container_of(bd, struct cirrus_device, ttm.bdev); | 36 | return container_of(bd, struct cirrus_device, ttm.bdev); |
37 | } | 37 | } |
38 | 38 | ||
39 | static int | ||
40 | cirrus_ttm_mem_global_init(struct drm_global_reference *ref) | ||
41 | { | ||
42 | return ttm_mem_global_init(ref->object); | ||
43 | } | ||
44 | |||
45 | static void | ||
46 | cirrus_ttm_mem_global_release(struct drm_global_reference *ref) | ||
47 | { | ||
48 | ttm_mem_global_release(ref->object); | ||
49 | } | ||
50 | |||
51 | static int cirrus_ttm_global_init(struct cirrus_device *cirrus) | ||
52 | { | ||
53 | struct drm_global_reference *global_ref; | ||
54 | int r; | ||
55 | |||
56 | global_ref = &cirrus->ttm.mem_global_ref; | ||
57 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
58 | global_ref->size = sizeof(struct ttm_mem_global); | ||
59 | global_ref->init = &cirrus_ttm_mem_global_init; | ||
60 | global_ref->release = &cirrus_ttm_mem_global_release; | ||
61 | r = drm_global_item_ref(global_ref); | ||
62 | if (r != 0) { | ||
63 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
64 | "subsystem.\n"); | ||
65 | return r; | ||
66 | } | ||
67 | |||
68 | cirrus->ttm.bo_global_ref.mem_glob = | ||
69 | cirrus->ttm.mem_global_ref.object; | ||
70 | global_ref = &cirrus->ttm.bo_global_ref.ref; | ||
71 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
72 | global_ref->size = sizeof(struct ttm_bo_global); | ||
73 | global_ref->init = &ttm_bo_global_init; | ||
74 | global_ref->release = &ttm_bo_global_release; | ||
75 | r = drm_global_item_ref(global_ref); | ||
76 | if (r != 0) { | ||
77 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
78 | drm_global_item_unref(&cirrus->ttm.mem_global_ref); | ||
79 | return r; | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void | ||
85 | cirrus_ttm_global_release(struct cirrus_device *cirrus) | ||
86 | { | ||
87 | if (cirrus->ttm.mem_global_ref.release == NULL) | ||
88 | return; | ||
89 | |||
90 | drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref); | ||
91 | drm_global_item_unref(&cirrus->ttm.mem_global_ref); | ||
92 | cirrus->ttm.mem_global_ref.release = NULL; | ||
93 | } | ||
94 | |||
95 | |||
96 | static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo) | 39 | static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo) |
97 | { | 40 | { |
98 | struct cirrus_bo *bo; | 41 | struct cirrus_bo *bo; |
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus) | |||
232 | struct drm_device *dev = cirrus->dev; | 175 | struct drm_device *dev = cirrus->dev; |
233 | struct ttm_bo_device *bdev = &cirrus->ttm.bdev; | 176 | struct ttm_bo_device *bdev = &cirrus->ttm.bdev; |
234 | 177 | ||
235 | ret = cirrus_ttm_global_init(cirrus); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | |||
239 | ret = ttm_bo_device_init(&cirrus->ttm.bdev, | 178 | ret = ttm_bo_device_init(&cirrus->ttm.bdev, |
240 | cirrus->ttm.bo_global_ref.ref.object, | ||
241 | &cirrus_bo_driver, | 179 | &cirrus_bo_driver, |
242 | dev->anon_inode->i_mapping, | 180 | dev->anon_inode->i_mapping, |
243 | DRM_FILE_PAGE_OFFSET, | 181 | DRM_FILE_PAGE_OFFSET, |
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) | |||
273 | 211 | ||
274 | ttm_bo_device_release(&cirrus->ttm.bdev); | 212 | ttm_bo_device_release(&cirrus->ttm.bdev); |
275 | 213 | ||
276 | cirrus_ttm_global_release(cirrus); | ||
277 | |||
278 | arch_phys_wc_del(cirrus->fb_mtrr); | 214 | arch_phys_wc_del(cirrus->fb_mtrr); |
279 | cirrus->fb_mtrr = 0; | 215 | cirrus->fb_mtrr = 0; |
280 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), | 216 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7e23b150ca80..9ac26437051b 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
@@ -398,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector, | |||
398 | { | 398 | { |
399 | struct drm_crtc_state *crtc_state; | 399 | struct drm_crtc_state *crtc_state; |
400 | struct drm_writeback_job *writeback_job = state->writeback_job; | 400 | struct drm_writeback_job *writeback_job = state->writeback_job; |
401 | const struct drm_display_info *info = &connector->display_info; | ||
402 | |||
403 | state->max_bpc = info->bpc ? info->bpc : 8; | ||
404 | if (connector->max_bpc_property) | ||
405 | state->max_bpc = min(state->max_bpc, state->max_requested_bpc); | ||
401 | 406 | ||
402 | if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) | 407 | if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job) |
403 | return 0; | 408 | return 0; |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index fe8dd8aa4ae4..bc9fc9665614 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -669,6 +669,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, | |||
669 | if (old_connector_state->link_status != | 669 | if (old_connector_state->link_status != |
670 | new_connector_state->link_status) | 670 | new_connector_state->link_status) |
671 | new_crtc_state->connectors_changed = true; | 671 | new_crtc_state->connectors_changed = true; |
672 | |||
673 | if (old_connector_state->max_requested_bpc != | ||
674 | new_connector_state->max_requested_bpc) | ||
675 | new_crtc_state->connectors_changed = true; | ||
672 | } | 676 | } |
673 | 677 | ||
674 | if (funcs->atomic_check) | 678 | if (funcs->atomic_check) |
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index d5b7f315098c..86ac33922b09 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c | |||
@@ -740,6 +740,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, | |||
740 | 740 | ||
741 | return set_out_fence_for_connector(state->state, connector, | 741 | return set_out_fence_for_connector(state->state, connector, |
742 | fence_ptr); | 742 | fence_ptr); |
743 | } else if (property == connector->max_bpc_property) { | ||
744 | state->max_requested_bpc = val; | ||
743 | } else if (connector->funcs->atomic_set_property) { | 745 | } else if (connector->funcs->atomic_set_property) { |
744 | return connector->funcs->atomic_set_property(connector, | 746 | return connector->funcs->atomic_set_property(connector, |
745 | state, property, val); | 747 | state, property, val); |
@@ -804,6 +806,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector, | |||
804 | *val = 0; | 806 | *val = 0; |
805 | } else if (property == config->writeback_out_fence_ptr_property) { | 807 | } else if (property == config->writeback_out_fence_ptr_property) { |
806 | *val = 0; | 808 | *val = 0; |
809 | } else if (property == connector->max_bpc_property) { | ||
810 | *val = state->max_requested_bpc; | ||
807 | } else if (connector->funcs->atomic_get_property) { | 811 | } else if (connector->funcs->atomic_get_property) { |
808 | return connector->funcs->atomic_get_property(connector, | 812 | return connector->funcs->atomic_get_property(connector, |
809 | state, property, val); | 813 | state, property, val); |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index aa18b1d7d3e4..fa9baacc863b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
@@ -932,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list) | |||
932 | * is no longer protected and userspace should take appropriate action | 932 | * is no longer protected and userspace should take appropriate action |
933 | * (whatever that might be). | 933 | * (whatever that might be). |
934 | * | 934 | * |
935 | * max bpc: | ||
936 | * This range property is used by userspace to limit the bit depth. When | ||
937 | * used the driver would limit the bpc in accordance with the valid range | ||
938 | * supported by the hardware and sink. Drivers to use the function | ||
939 | * drm_connector_attach_max_bpc_property() to create and attach the | ||
940 | * property to the connector during initialization. | ||
941 | * | ||
935 | * Connectors also have one standardized atomic property: | 942 | * Connectors also have one standardized atomic property: |
936 | * | 943 | * |
937 | * CRTC_ID: | 944 | * CRTC_ID: |
@@ -1600,6 +1607,40 @@ void drm_connector_set_link_status_property(struct drm_connector *connector, | |||
1600 | EXPORT_SYMBOL(drm_connector_set_link_status_property); | 1607 | EXPORT_SYMBOL(drm_connector_set_link_status_property); |
1601 | 1608 | ||
1602 | /** | 1609 | /** |
1610 | * drm_connector_attach_max_bpc_property - attach "max bpc" property | ||
1611 | * @connector: connector to attach max bpc property on. | ||
1612 | * @min: The minimum bit depth supported by the connector. | ||
1613 | * @max: The maximum bit depth supported by the connector. | ||
1614 | * | ||
1615 | * This is used to add support for limiting the bit depth on a connector. | ||
1616 | * | ||
1617 | * Returns: | ||
1618 | * Zero on success, negative errno on failure. | ||
1619 | */ | ||
1620 | int drm_connector_attach_max_bpc_property(struct drm_connector *connector, | ||
1621 | int min, int max) | ||
1622 | { | ||
1623 | struct drm_device *dev = connector->dev; | ||
1624 | struct drm_property *prop; | ||
1625 | |||
1626 | prop = connector->max_bpc_property; | ||
1627 | if (!prop) { | ||
1628 | prop = drm_property_create_range(dev, 0, "max bpc", min, max); | ||
1629 | if (!prop) | ||
1630 | return -ENOMEM; | ||
1631 | |||
1632 | connector->max_bpc_property = prop; | ||
1633 | } | ||
1634 | |||
1635 | drm_object_attach_property(&connector->base, prop, max); | ||
1636 | connector->state->max_requested_bpc = max; | ||
1637 | connector->state->max_bpc = max; | ||
1638 | |||
1639 | return 0; | ||
1640 | } | ||
1641 | EXPORT_SYMBOL(drm_connector_attach_max_bpc_property); | ||
1642 | |||
1643 | /** | ||
1603 | * drm_connector_init_panel_orientation_property - | 1644 | * drm_connector_init_panel_orientation_property - |
1604 | * initialize the connecters panel_orientation property | 1645 | * initialize the connecters panel_orientation property |
1605 | * @connector: connector for which to init the panel-orientation property. | 1646 | * @connector: connector for which to init the panel-orientation property. |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 37c01b6076ec..6d483487f2b4 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -1352,3 +1352,93 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, | |||
1352 | return 0; | 1352 | return 0; |
1353 | } | 1353 | } |
1354 | EXPORT_SYMBOL(drm_dp_read_desc); | 1354 | EXPORT_SYMBOL(drm_dp_read_desc); |
1355 | |||
1356 | /** | ||
1357 | * DRM DP Helpers for DSC | ||
1358 | */ | ||
1359 | u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE], | ||
1360 | bool is_edp) | ||
1361 | { | ||
1362 | u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT]; | ||
1363 | |||
1364 | if (is_edp) { | ||
1365 | /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */ | ||
1366 | if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK) | ||
1367 | return 4; | ||
1368 | if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK) | ||
1369 | return 2; | ||
1370 | if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK) | ||
1371 | return 1; | ||
1372 | } else { | ||
1373 | /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */ | ||
1374 | u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT]; | ||
1375 | |||
1376 | if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK) | ||
1377 | return 24; | ||
1378 | if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK) | ||
1379 | return 20; | ||
1380 | if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK) | ||
1381 | return 16; | ||
1382 | if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK) | ||
1383 | return 12; | ||
1384 | if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK) | ||
1385 | return 10; | ||
1386 | if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK) | ||
1387 | return 8; | ||
1388 | if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK) | ||
1389 | return 6; | ||
1390 | if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK) | ||
1391 | return 4; | ||
1392 | if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK) | ||
1393 | return 2; | ||
1394 | if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK) | ||
1395 | return 1; | ||
1396 | } | ||
1397 | |||
1398 | return 0; | ||
1399 | } | ||
1400 | EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count); | ||
1401 | |||
1402 | u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) | ||
1403 | { | ||
1404 | u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT]; | ||
1405 | |||
1406 | switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) { | ||
1407 | case DP_DSC_LINE_BUF_BIT_DEPTH_9: | ||
1408 | return 9; | ||
1409 | case DP_DSC_LINE_BUF_BIT_DEPTH_10: | ||
1410 | return 10; | ||
1411 | case DP_DSC_LINE_BUF_BIT_DEPTH_11: | ||
1412 | return 11; | ||
1413 | case DP_DSC_LINE_BUF_BIT_DEPTH_12: | ||
1414 | return 12; | ||
1415 | case DP_DSC_LINE_BUF_BIT_DEPTH_13: | ||
1416 | return 13; | ||
1417 | case DP_DSC_LINE_BUF_BIT_DEPTH_14: | ||
1418 | return 14; | ||
1419 | case DP_DSC_LINE_BUF_BIT_DEPTH_15: | ||
1420 | return 15; | ||
1421 | case DP_DSC_LINE_BUF_BIT_DEPTH_16: | ||
1422 | return 16; | ||
1423 | case DP_DSC_LINE_BUF_BIT_DEPTH_8: | ||
1424 | return 8; | ||
1425 | } | ||
1426 | |||
1427 | return 0; | ||
1428 | } | ||
1429 | EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth); | ||
1430 | |||
1431 | u8 drm_dp_dsc_sink_max_color_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]) | ||
1432 | { | ||
1433 | u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT]; | ||
1434 | |||
1435 | if (color_depth & DP_DSC_12_BPC) | ||
1436 | return 12; | ||
1437 | if (color_depth & DP_DSC_10_BPC) | ||
1438 | return 10; | ||
1439 | if (color_depth & DP_DSC_8_BPC) | ||
1440 | return 8; | ||
1441 | |||
1442 | return 0; | ||
1443 | } | ||
1444 | EXPORT_SYMBOL(drm_dp_dsc_sink_max_color_depth); | ||
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 8c3cfac437f4..529414556962 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ | |||
1275 | mutex_lock(&mgr->lock); | 1275 | mutex_lock(&mgr->lock); |
1276 | mstb = mgr->mst_primary; | 1276 | mstb = mgr->mst_primary; |
1277 | 1277 | ||
1278 | if (!mstb) | ||
1279 | goto out; | ||
1280 | |||
1278 | for (i = 0; i < lct - 1; i++) { | 1281 | for (i = 0; i < lct - 1; i++) { |
1279 | int shift = (i % 2) ? 0 : 4; | 1282 | int shift = (i % 2) ? 0 : 4; |
1280 | int port_num = (rad[i / 2] >> shift) & 0xf; | 1283 | int port_num = (rad[i / 2] >> shift) & 0xf; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 5f7e99bf4fa4..12e5e2be7890 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -960,14 +960,12 @@ static void drm_core_exit(void) | |||
960 | drm_sysfs_destroy(); | 960 | drm_sysfs_destroy(); |
961 | idr_destroy(&drm_minors_idr); | 961 | idr_destroy(&drm_minors_idr); |
962 | drm_connector_ida_destroy(); | 962 | drm_connector_ida_destroy(); |
963 | drm_global_release(); | ||
964 | } | 963 | } |
965 | 964 | ||
966 | static int __init drm_core_init(void) | 965 | static int __init drm_core_init(void) |
967 | { | 966 | { |
968 | int ret; | 967 | int ret; |
969 | 968 | ||
970 | drm_global_init(); | ||
971 | drm_connector_ida_init(); | 969 | drm_connector_ida_init(); |
972 | idr_init(&drm_minors_idr); | 970 | idr_init(&drm_minors_idr); |
973 | 971 | ||
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c deleted file mode 100644 index 5799e2782dd1..000000000000 --- a/drivers/gpu/drm/drm_global.c +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 OR MIT | ||
2 | /************************************************************************** | ||
3 | * | ||
4 | * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/mutex.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <drm/drm_global.h> | ||
35 | |||
36 | struct drm_global_item { | ||
37 | struct mutex mutex; | ||
38 | void *object; | ||
39 | int refcount; | ||
40 | }; | ||
41 | |||
42 | static struct drm_global_item glob[DRM_GLOBAL_NUM]; | ||
43 | |||
44 | void drm_global_init(void) | ||
45 | { | ||
46 | int i; | ||
47 | |||
48 | for (i = 0; i < DRM_GLOBAL_NUM; ++i) { | ||
49 | struct drm_global_item *item = &glob[i]; | ||
50 | mutex_init(&item->mutex); | ||
51 | item->object = NULL; | ||
52 | item->refcount = 0; | ||
53 | } | ||
54 | } | ||
55 | |||
56 | void drm_global_release(void) | ||
57 | { | ||
58 | int i; | ||
59 | for (i = 0; i < DRM_GLOBAL_NUM; ++i) { | ||
60 | struct drm_global_item *item = &glob[i]; | ||
61 | BUG_ON(item->object != NULL); | ||
62 | BUG_ON(item->refcount != 0); | ||
63 | } | ||
64 | } | ||
65 | |||
66 | /** | ||
67 | * drm_global_item_ref - Initialize and acquire reference to memory | ||
68 | * object | ||
69 | * @ref: Object for initialization | ||
70 | * | ||
71 | * This initializes a memory object, allocating memory and calling the | ||
72 | * .init() hook. Further calls will increase the reference count for | ||
73 | * that item. | ||
74 | * | ||
75 | * Returns: | ||
76 | * Zero on success, non-zero otherwise. | ||
77 | */ | ||
78 | int drm_global_item_ref(struct drm_global_reference *ref) | ||
79 | { | ||
80 | int ret = 0; | ||
81 | struct drm_global_item *item = &glob[ref->global_type]; | ||
82 | |||
83 | mutex_lock(&item->mutex); | ||
84 | if (item->refcount == 0) { | ||
85 | ref->object = kzalloc(ref->size, GFP_KERNEL); | ||
86 | if (unlikely(ref->object == NULL)) { | ||
87 | ret = -ENOMEM; | ||
88 | goto error_unlock; | ||
89 | } | ||
90 | ret = ref->init(ref); | ||
91 | if (unlikely(ret != 0)) | ||
92 | goto error_free; | ||
93 | |||
94 | item->object = ref->object; | ||
95 | } else { | ||
96 | ref->object = item->object; | ||
97 | } | ||
98 | |||
99 | ++item->refcount; | ||
100 | mutex_unlock(&item->mutex); | ||
101 | return 0; | ||
102 | |||
103 | error_free: | ||
104 | kfree(ref->object); | ||
105 | ref->object = NULL; | ||
106 | error_unlock: | ||
107 | mutex_unlock(&item->mutex); | ||
108 | return ret; | ||
109 | } | ||
110 | EXPORT_SYMBOL(drm_global_item_ref); | ||
111 | |||
112 | /** | ||
113 | * drm_global_item_unref - Drop reference to memory | ||
114 | * object | ||
115 | * @ref: Object being removed | ||
116 | * | ||
117 | * Drop a reference to the memory object and eventually call the | ||
118 | * release() hook. The allocated object should be dropped in the | ||
119 | * release() hook or before calling this function | ||
120 | * | ||
121 | */ | ||
122 | |||
123 | void drm_global_item_unref(struct drm_global_reference *ref) | ||
124 | { | ||
125 | struct drm_global_item *item = &glob[ref->global_type]; | ||
126 | |||
127 | mutex_lock(&item->mutex); | ||
128 | BUG_ON(item->refcount == 0); | ||
129 | BUG_ON(ref->object != item->object); | ||
130 | if (--item->refcount == 0) { | ||
131 | ref->release(ref); | ||
132 | item->object = NULL; | ||
133 | } | ||
134 | mutex_unlock(&item->mutex); | ||
135 | } | ||
136 | EXPORT_SYMBOL(drm_global_item_unref); | ||
137 | |||
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index e7c3ed6c9a2e..49a6763693f1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c | |||
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) | |||
93 | * If the GPU managed to complete this jobs fence, the timout is | 93 | * If the GPU managed to complete this jobs fence, the timout is |
94 | * spurious. Bail out. | 94 | * spurious. Bail out. |
95 | */ | 95 | */ |
96 | if (fence_completed(gpu, submit->out_fence->seqno)) | 96 | if (dma_fence_is_signaled(submit->out_fence)) |
97 | return; | 97 | return; |
98 | 98 | ||
99 | /* | 99 | /* |
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) | |||
105 | change = dma_addr - gpu->hangcheck_dma_addr; | 105 | change = dma_addr - gpu->hangcheck_dma_addr; |
106 | if (change < 0 || change > 16) { | 106 | if (change < 0 || change > 16) { |
107 | gpu->hangcheck_dma_addr = dma_addr; | 107 | gpu->hangcheck_dma_addr = dma_addr; |
108 | schedule_delayed_work(&sched_job->sched->work_tdr, | ||
109 | sched_job->sched->timeout); | ||
110 | return; | 108 | return; |
111 | } | 109 | } |
112 | 110 | ||
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) | |||
127 | { | 125 | { |
128 | struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); | 126 | struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); |
129 | 127 | ||
128 | drm_sched_job_cleanup(sched_job); | ||
129 | |||
130 | etnaviv_submit_put(submit); | 130 | etnaviv_submit_put(submit); |
131 | } | 131 | } |
132 | 132 | ||
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, | |||
159 | submit->out_fence, 0, | 159 | submit->out_fence, 0, |
160 | INT_MAX, GFP_KERNEL); | 160 | INT_MAX, GFP_KERNEL); |
161 | if (submit->out_fence_id < 0) { | 161 | if (submit->out_fence_id < 0) { |
162 | drm_sched_job_cleanup(&submit->sched_job); | ||
162 | ret = -ENOMEM; | 163 | ret = -ENOMEM; |
163 | goto out_unlock; | 164 | goto out_unlock; |
164 | } | 165 | } |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 94529aa82339..aef487dd8731 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end) | |||
164 | return frm; | 164 | return frm; |
165 | } | 165 | } |
166 | 166 | ||
167 | static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc) | ||
168 | { | ||
169 | struct decon_context *ctx = crtc->ctx; | ||
170 | |||
171 | return decon_get_frame_count(ctx, false); | ||
172 | } | ||
173 | |||
174 | static void decon_setup_trigger(struct decon_context *ctx) | 167 | static void decon_setup_trigger(struct decon_context *ctx) |
175 | { | 168 | { |
176 | if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) | 169 | if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG)) |
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = { | |||
536 | .disable = decon_disable, | 529 | .disable = decon_disable, |
537 | .enable_vblank = decon_enable_vblank, | 530 | .enable_vblank = decon_enable_vblank, |
538 | .disable_vblank = decon_disable_vblank, | 531 | .disable_vblank = decon_disable_vblank, |
539 | .get_vblank_counter = decon_get_vblank_counter, | ||
540 | .atomic_begin = decon_atomic_begin, | 532 | .atomic_begin = decon_atomic_begin, |
541 | .update_plane = decon_update_plane, | 533 | .update_plane = decon_update_plane, |
542 | .disable_plane = decon_disable_plane, | 534 | .disable_plane = decon_disable_plane, |
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data) | |||
554 | int ret; | 546 | int ret; |
555 | 547 | ||
556 | ctx->drm_dev = drm_dev; | 548 | ctx->drm_dev = drm_dev; |
557 | drm_dev->max_vblank_count = 0xffffffff; | ||
558 | 549 | ||
559 | for (win = ctx->first_win; win < WINDOWS_NR; win++) { | 550 | for (win = ctx->first_win; win < WINDOWS_NR; win++) { |
560 | ctx->configs[win].pixel_formats = decon_formats; | 551 | ctx->configs[win].pixel_formats = decon_formats; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index eea90251808f..2696289ecc78 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc) | |||
162 | exynos_crtc->ops->disable_vblank(exynos_crtc); | 162 | exynos_crtc->ops->disable_vblank(exynos_crtc); |
163 | } | 163 | } |
164 | 164 | ||
165 | static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc) | ||
166 | { | ||
167 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | ||
168 | |||
169 | if (exynos_crtc->ops->get_vblank_counter) | ||
170 | return exynos_crtc->ops->get_vblank_counter(exynos_crtc); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static const struct drm_crtc_funcs exynos_crtc_funcs = { | 165 | static const struct drm_crtc_funcs exynos_crtc_funcs = { |
176 | .set_config = drm_atomic_helper_set_config, | 166 | .set_config = drm_atomic_helper_set_config, |
177 | .page_flip = drm_atomic_helper_page_flip, | 167 | .page_flip = drm_atomic_helper_page_flip, |
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = { | |||
181 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, | 171 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, |
182 | .enable_vblank = exynos_drm_crtc_enable_vblank, | 172 | .enable_vblank = exynos_drm_crtc_enable_vblank, |
183 | .disable_vblank = exynos_drm_crtc_disable_vblank, | 173 | .disable_vblank = exynos_drm_crtc_disable_vblank, |
184 | .get_vblank_counter = exynos_drm_crtc_get_vblank_counter, | ||
185 | }; | 174 | }; |
186 | 175 | ||
187 | struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, | 176 | struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index ec9604f1272b..5e61e707f955 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops { | |||
135 | void (*disable)(struct exynos_drm_crtc *crtc); | 135 | void (*disable)(struct exynos_drm_crtc *crtc); |
136 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); | 136 | int (*enable_vblank)(struct exynos_drm_crtc *crtc); |
137 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); | 137 | void (*disable_vblank)(struct exynos_drm_crtc *crtc); |
138 | u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc); | ||
139 | enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, | 138 | enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc, |
140 | const struct drm_display_mode *mode); | 139 | const struct drm_display_mode *mode); |
141 | bool (*mode_fixup)(struct exynos_drm_crtc *crtc, | 140 | bool (*mode_fixup)(struct exynos_drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 07af7758066d..d81e62ae286a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <drm/drmP.h> | 15 | #include <drm/drmP.h> |
16 | #include <drm/drm_crtc_helper.h> | 16 | #include <drm/drm_crtc_helper.h> |
17 | #include <drm/drm_fb_helper.h> | ||
17 | #include <drm/drm_mipi_dsi.h> | 18 | #include <drm/drm_mipi_dsi.h> |
18 | #include <drm/drm_panel.h> | 19 | #include <drm/drm_panel.h> |
19 | #include <drm/drm_atomic_helper.h> | 20 | #include <drm/drm_atomic_helper.h> |
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) | |||
1474 | { | 1475 | { |
1475 | struct exynos_dsi *dsi = encoder_to_dsi(encoder); | 1476 | struct exynos_dsi *dsi = encoder_to_dsi(encoder); |
1476 | struct drm_connector *connector = &dsi->connector; | 1477 | struct drm_connector *connector = &dsi->connector; |
1478 | struct drm_device *drm = encoder->dev; | ||
1477 | int ret; | 1479 | int ret; |
1478 | 1480 | ||
1479 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 1481 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1480 | 1482 | ||
1481 | ret = drm_connector_init(encoder->dev, connector, | 1483 | ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs, |
1482 | &exynos_dsi_connector_funcs, | ||
1483 | DRM_MODE_CONNECTOR_DSI); | 1484 | DRM_MODE_CONNECTOR_DSI); |
1484 | if (ret) { | 1485 | if (ret) { |
1485 | DRM_ERROR("Failed to initialize connector with drm\n"); | 1486 | DRM_ERROR("Failed to initialize connector with drm\n"); |
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) | |||
1489 | connector->status = connector_status_disconnected; | 1490 | connector->status = connector_status_disconnected; |
1490 | drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); | 1491 | drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs); |
1491 | drm_connector_attach_encoder(connector, encoder); | 1492 | drm_connector_attach_encoder(connector, encoder); |
1493 | if (!drm->registered) | ||
1494 | return 0; | ||
1492 | 1495 | ||
1496 | connector->funcs->reset(connector); | ||
1497 | drm_fb_helper_add_one_connector(drm->fb_helper, connector); | ||
1498 | drm_connector_register(connector); | ||
1493 | return 0; | 1499 | return 0; |
1494 | } | 1500 | } |
1495 | 1501 | ||
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host, | |||
1527 | } | 1533 | } |
1528 | 1534 | ||
1529 | dsi->panel = of_drm_find_panel(device->dev.of_node); | 1535 | dsi->panel = of_drm_find_panel(device->dev.of_node); |
1530 | if (dsi->panel) { | 1536 | if (IS_ERR(dsi->panel)) { |
1537 | dsi->panel = NULL; | ||
1538 | } else { | ||
1531 | drm_panel_attach(dsi->panel, &dsi->connector); | 1539 | drm_panel_attach(dsi->panel, &dsi->connector); |
1532 | dsi->connector.status = connector_status_connected; | 1540 | dsi->connector.status = connector_status_connected; |
1533 | } | 1541 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 918dd2c82209..01d182289efa 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c | |||
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev) | |||
192 | struct drm_fb_helper *helper; | 192 | struct drm_fb_helper *helper; |
193 | int ret; | 193 | int ret; |
194 | 194 | ||
195 | if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) | 195 | if (!dev->mode_config.num_crtc) |
196 | return 0; | 196 | return 0; |
197 | 197 | ||
198 | fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); | 198 | fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 45c25a488f42..3c168ae77b0c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | |||
@@ -49,8 +49,6 @@ struct hibmc_drm_private { | |||
49 | bool mode_config_initialized; | 49 | bool mode_config_initialized; |
50 | 50 | ||
51 | /* ttm */ | 51 | /* ttm */ |
52 | struct drm_global_reference mem_global_ref; | ||
53 | struct ttm_bo_global_ref bo_global_ref; | ||
54 | struct ttm_bo_device bdev; | 52 | struct ttm_bo_device bdev; |
55 | bool initialized; | 53 | bool initialized; |
56 | 54 | ||
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 2e3e0bdb8932..dd383267884c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c | |||
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd) | |||
29 | return container_of(bd, struct hibmc_drm_private, bdev); | 29 | return container_of(bd, struct hibmc_drm_private, bdev); |
30 | } | 30 | } |
31 | 31 | ||
32 | static int | ||
33 | hibmc_ttm_mem_global_init(struct drm_global_reference *ref) | ||
34 | { | ||
35 | return ttm_mem_global_init(ref->object); | ||
36 | } | ||
37 | |||
38 | static void | ||
39 | hibmc_ttm_mem_global_release(struct drm_global_reference *ref) | ||
40 | { | ||
41 | ttm_mem_global_release(ref->object); | ||
42 | } | ||
43 | |||
44 | static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc) | ||
45 | { | ||
46 | int ret; | ||
47 | |||
48 | hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM; | ||
49 | hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global); | ||
50 | hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init; | ||
51 | hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release; | ||
52 | ret = drm_global_item_ref(&hibmc->mem_global_ref); | ||
53 | if (ret) { | ||
54 | DRM_ERROR("could not get ref on ttm global: %d\n", ret); | ||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | hibmc->bo_global_ref.mem_glob = | ||
59 | hibmc->mem_global_ref.object; | ||
60 | hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO; | ||
61 | hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global); | ||
62 | hibmc->bo_global_ref.ref.init = &ttm_bo_global_init; | ||
63 | hibmc->bo_global_ref.ref.release = &ttm_bo_global_release; | ||
64 | ret = drm_global_item_ref(&hibmc->bo_global_ref.ref); | ||
65 | if (ret) { | ||
66 | DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret); | ||
67 | drm_global_item_unref(&hibmc->mem_global_ref); | ||
68 | return ret; | ||
69 | } | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static void | ||
74 | hibmc_ttm_global_release(struct hibmc_drm_private *hibmc) | ||
75 | { | ||
76 | drm_global_item_unref(&hibmc->bo_global_ref.ref); | ||
77 | drm_global_item_unref(&hibmc->mem_global_ref); | ||
78 | hibmc->mem_global_ref.release = NULL; | ||
79 | } | ||
80 | |||
81 | static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo) | 32 | static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo) |
82 | { | 33 | { |
83 | struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo); | 34 | struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo); |
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc) | |||
237 | struct drm_device *dev = hibmc->dev; | 188 | struct drm_device *dev = hibmc->dev; |
238 | struct ttm_bo_device *bdev = &hibmc->bdev; | 189 | struct ttm_bo_device *bdev = &hibmc->bdev; |
239 | 190 | ||
240 | ret = hibmc_ttm_global_init(hibmc); | ||
241 | if (ret) | ||
242 | return ret; | ||
243 | |||
244 | ret = ttm_bo_device_init(&hibmc->bdev, | 191 | ret = ttm_bo_device_init(&hibmc->bdev, |
245 | hibmc->bo_global_ref.ref.object, | ||
246 | &hibmc_bo_driver, | 192 | &hibmc_bo_driver, |
247 | dev->anon_inode->i_mapping, | 193 | dev->anon_inode->i_mapping, |
248 | DRM_FILE_PAGE_OFFSET, | 194 | DRM_FILE_PAGE_OFFSET, |
249 | true); | 195 | true); |
250 | if (ret) { | 196 | if (ret) { |
251 | hibmc_ttm_global_release(hibmc); | ||
252 | DRM_ERROR("error initializing bo driver: %d\n", ret); | 197 | DRM_ERROR("error initializing bo driver: %d\n", ret); |
253 | return ret; | 198 | return ret; |
254 | } | 199 | } |
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc) | |||
256 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, | 201 | ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, |
257 | hibmc->fb_size >> PAGE_SHIFT); | 202 | hibmc->fb_size >> PAGE_SHIFT); |
258 | if (ret) { | 203 | if (ret) { |
259 | hibmc_ttm_global_release(hibmc); | ||
260 | DRM_ERROR("failed ttm VRAM init: %d\n", ret); | 204 | DRM_ERROR("failed ttm VRAM init: %d\n", ret); |
261 | return ret; | 205 | return ret; |
262 | } | 206 | } |
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc) | |||
271 | return; | 215 | return; |
272 | 216 | ||
273 | ttm_bo_device_release(&hibmc->bdev); | 217 | ttm_bo_device_release(&hibmc->bdev); |
274 | hibmc_ttm_global_release(hibmc); | ||
275 | hibmc->mm_inited = false; | 218 | hibmc->mm_inited = false; |
276 | } | 219 | } |
277 | 220 | ||
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 1c2857f13ad4..0ff878c994e2 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \ | |||
75 | i915_gemfs.o \ | 75 | i915_gemfs.o \ |
76 | i915_query.o \ | 76 | i915_query.o \ |
77 | i915_request.o \ | 77 | i915_request.o \ |
78 | i915_scheduler.o \ | ||
78 | i915_timeline.o \ | 79 | i915_timeline.o \ |
79 | i915_trace_points.o \ | 80 | i915_trace_points.o \ |
80 | i915_vma.o \ | 81 | i915_vma.o \ |
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \ | |||
112 | intel_bios.o \ | 113 | intel_bios.o \ |
113 | intel_cdclk.o \ | 114 | intel_cdclk.o \ |
114 | intel_color.o \ | 115 | intel_color.o \ |
116 | intel_combo_phy.o \ | ||
117 | intel_connector.o \ | ||
115 | intel_display.o \ | 118 | intel_display.o \ |
116 | intel_dpio_phy.o \ | 119 | intel_dpio_phy.o \ |
117 | intel_dpll_mgr.o \ | 120 | intel_dpll_mgr.o \ |
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \ | |||
120 | intel_frontbuffer.o \ | 123 | intel_frontbuffer.o \ |
121 | intel_hdcp.o \ | 124 | intel_hdcp.o \ |
122 | intel_hotplug.o \ | 125 | intel_hotplug.o \ |
123 | intel_modes.o \ | ||
124 | intel_overlay.o \ | 126 | intel_overlay.o \ |
125 | intel_psr.o \ | 127 | intel_psr.o \ |
128 | intel_quirks.o \ | ||
126 | intel_sideband.o \ | 129 | intel_sideband.o \ |
127 | intel_sprite.o | 130 | intel_sprite.o |
128 | i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o | 131 | i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o |
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \ | |||
142 | intel_dp_link_training.o \ | 145 | intel_dp_link_training.o \ |
143 | intel_dp_mst.o \ | 146 | intel_dp_mst.o \ |
144 | intel_dp.o \ | 147 | intel_dp.o \ |
148 | intel_dsi.o \ | ||
145 | intel_dsi_dcs_backlight.o \ | 149 | intel_dsi_dcs_backlight.o \ |
146 | intel_dsi_vbt.o \ | 150 | intel_dsi_vbt.o \ |
147 | intel_dvo.o \ | 151 | intel_dvo.o \ |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 2402395a068d..58e166effa45 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) | |||
1905 | vgpu_free_mm(mm); | 1905 | vgpu_free_mm(mm); |
1906 | return ERR_PTR(-ENOMEM); | 1906 | return ERR_PTR(-ENOMEM); |
1907 | } | 1907 | } |
1908 | mm->ggtt_mm.last_partial_off = -1UL; | ||
1909 | 1908 | ||
1910 | return mm; | 1909 | return mm; |
1911 | } | 1910 | } |
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) | |||
1930 | invalidate_ppgtt_mm(mm); | 1929 | invalidate_ppgtt_mm(mm); |
1931 | } else { | 1930 | } else { |
1932 | vfree(mm->ggtt_mm.virtual_ggtt); | 1931 | vfree(mm->ggtt_mm.virtual_ggtt); |
1933 | mm->ggtt_mm.last_partial_off = -1UL; | ||
1934 | } | 1932 | } |
1935 | 1933 | ||
1936 | vgpu_free_mm(mm); | 1934 | vgpu_free_mm(mm); |
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
2168 | struct intel_gvt_gtt_entry e, m; | 2166 | struct intel_gvt_gtt_entry e, m; |
2169 | dma_addr_t dma_addr; | 2167 | dma_addr_t dma_addr; |
2170 | int ret; | 2168 | int ret; |
2169 | struct intel_gvt_partial_pte *partial_pte, *pos, *n; | ||
2170 | bool partial_update = false; | ||
2171 | 2171 | ||
2172 | if (bytes != 4 && bytes != 8) | 2172 | if (bytes != 4 && bytes != 8) |
2173 | return -EINVAL; | 2173 | return -EINVAL; |
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
2178 | if (!vgpu_gmadr_is_valid(vgpu, gma)) | 2178 | if (!vgpu_gmadr_is_valid(vgpu, gma)) |
2179 | return 0; | 2179 | return 0; |
2180 | 2180 | ||
2181 | ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); | 2181 | e.type = GTT_TYPE_GGTT_PTE; |
2182 | |||
2183 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, | 2182 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, |
2184 | bytes); | 2183 | bytes); |
2185 | 2184 | ||
2186 | /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes | 2185 | /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes |
2187 | * write, we assume the two 4 bytes writes are consecutive. | 2186 | * write, save the first 4 bytes in a list and update virtual |
2188 | * Otherwise, we abort and report error | 2187 | * PTE. Only update shadow PTE when the second 4 bytes comes. |
2189 | */ | 2188 | */ |
2190 | if (bytes < info->gtt_entry_size) { | 2189 | if (bytes < info->gtt_entry_size) { |
2191 | if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { | 2190 | bool found = false; |
2192 | /* the first partial part*/ | 2191 | |
2193 | ggtt_mm->ggtt_mm.last_partial_off = off; | 2192 | list_for_each_entry_safe(pos, n, |
2194 | ggtt_mm->ggtt_mm.last_partial_data = e.val64; | 2193 | &ggtt_mm->ggtt_mm.partial_pte_list, list) { |
2195 | return 0; | 2194 | if (g_gtt_index == pos->offset >> |
2196 | } else if ((g_gtt_index == | 2195 | info->gtt_entry_size_shift) { |
2197 | (ggtt_mm->ggtt_mm.last_partial_off >> | 2196 | if (off != pos->offset) { |
2198 | info->gtt_entry_size_shift)) && | 2197 | /* the second partial part*/ |
2199 | (off != ggtt_mm->ggtt_mm.last_partial_off)) { | 2198 | int last_off = pos->offset & |
2200 | /* the second partial part */ | 2199 | (info->gtt_entry_size - 1); |
2201 | 2200 | ||
2202 | int last_off = ggtt_mm->ggtt_mm.last_partial_off & | 2201 | memcpy((void *)&e.val64 + last_off, |
2203 | (info->gtt_entry_size - 1); | 2202 | (void *)&pos->data + last_off, |
2204 | 2203 | bytes); | |
2205 | memcpy((void *)&e.val64 + last_off, | 2204 | |
2206 | (void *)&ggtt_mm->ggtt_mm.last_partial_data + | 2205 | list_del(&pos->list); |
2207 | last_off, bytes); | 2206 | kfree(pos); |
2208 | 2207 | found = true; | |
2209 | ggtt_mm->ggtt_mm.last_partial_off = -1UL; | 2208 | break; |
2210 | } else { | 2209 | } |
2211 | int last_offset; | 2210 | |
2212 | 2211 | /* update of the first partial part */ | |
2213 | gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", | 2212 | pos->data = e.val64; |
2214 | ggtt_mm->ggtt_mm.last_partial_off, off, | 2213 | ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); |
2215 | bytes, info->gtt_entry_size); | 2214 | return 0; |
2216 | 2215 | } | |
2217 | /* set host ggtt entry to scratch page and clear | 2216 | } |
2218 | * virtual ggtt entry as not present for last | ||
2219 | * partially write offset | ||
2220 | */ | ||
2221 | last_offset = ggtt_mm->ggtt_mm.last_partial_off & | ||
2222 | (~(info->gtt_entry_size - 1)); | ||
2223 | |||
2224 | ggtt_get_host_entry(ggtt_mm, &m, last_offset); | ||
2225 | ggtt_invalidate_pte(vgpu, &m); | ||
2226 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); | ||
2227 | ops->clear_present(&m); | ||
2228 | ggtt_set_host_entry(ggtt_mm, &m, last_offset); | ||
2229 | ggtt_invalidate(gvt->dev_priv); | ||
2230 | |||
2231 | ggtt_get_guest_entry(ggtt_mm, &e, last_offset); | ||
2232 | ops->clear_present(&e); | ||
2233 | ggtt_set_guest_entry(ggtt_mm, &e, last_offset); | ||
2234 | |||
2235 | ggtt_mm->ggtt_mm.last_partial_off = off; | ||
2236 | ggtt_mm->ggtt_mm.last_partial_data = e.val64; | ||
2237 | 2217 | ||
2238 | return 0; | 2218 | if (!found) { |
2219 | /* the first partial part */ | ||
2220 | partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); | ||
2221 | if (!partial_pte) | ||
2222 | return -ENOMEM; | ||
2223 | partial_pte->offset = off; | ||
2224 | partial_pte->data = e.val64; | ||
2225 | list_add_tail(&partial_pte->list, | ||
2226 | &ggtt_mm->ggtt_mm.partial_pte_list); | ||
2227 | partial_update = true; | ||
2239 | } | 2228 | } |
2240 | } | 2229 | } |
2241 | 2230 | ||
2242 | if (ops->test_present(&e)) { | 2231 | if (!partial_update && (ops->test_present(&e))) { |
2243 | gfn = ops->get_pfn(&e); | 2232 | gfn = ops->get_pfn(&e); |
2244 | m = e; | 2233 | m = e; |
2245 | 2234 | ||
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
2263 | } else | 2252 | } else |
2264 | ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); | 2253 | ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); |
2265 | } else { | 2254 | } else { |
2266 | ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); | ||
2267 | ggtt_invalidate_pte(vgpu, &m); | ||
2268 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); | 2255 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); |
2269 | ops->clear_present(&m); | 2256 | ops->clear_present(&m); |
2270 | } | 2257 | } |
2271 | 2258 | ||
2272 | out: | 2259 | out: |
2260 | ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); | ||
2261 | |||
2262 | ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); | ||
2263 | ggtt_invalidate_pte(vgpu, &e); | ||
2264 | |||
2273 | ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); | 2265 | ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); |
2274 | ggtt_invalidate(gvt->dev_priv); | 2266 | ggtt_invalidate(gvt->dev_priv); |
2275 | ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); | ||
2276 | return 0; | 2267 | return 0; |
2277 | } | 2268 | } |
2278 | 2269 | ||
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
2430 | 2421 | ||
2431 | intel_vgpu_reset_ggtt(vgpu, false); | 2422 | intel_vgpu_reset_ggtt(vgpu, false); |
2432 | 2423 | ||
2424 | INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); | ||
2425 | |||
2433 | return create_scratch_page_tree(vgpu); | 2426 | return create_scratch_page_tree(vgpu); |
2434 | } | 2427 | } |
2435 | 2428 | ||
@@ -2454,6 +2447,14 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) | |||
2454 | 2447 | ||
2455 | static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) | 2448 | static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) |
2456 | { | 2449 | { |
2450 | struct intel_gvt_partial_pte *pos; | ||
2451 | |||
2452 | list_for_each_entry(pos, | ||
2453 | &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) { | ||
2454 | gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", | ||
2455 | pos->offset, pos->data); | ||
2456 | kfree(pos); | ||
2457 | } | ||
2457 | intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); | 2458 | intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); |
2458 | vgpu->gtt.ggtt_mm = NULL; | 2459 | vgpu->gtt.ggtt_mm = NULL; |
2459 | } | 2460 | } |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb..d8cb04cc946d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -35,7 +35,6 @@ | |||
35 | #define _GVT_GTT_H_ | 35 | #define _GVT_GTT_H_ |
36 | 36 | ||
37 | #define I915_GTT_PAGE_SHIFT 12 | 37 | #define I915_GTT_PAGE_SHIFT 12 |
38 | #define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1)) | ||
39 | 38 | ||
40 | struct intel_vgpu_mm; | 39 | struct intel_vgpu_mm; |
41 | 40 | ||
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type { | |||
133 | 132 | ||
134 | #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES | 133 | #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES |
135 | 134 | ||
135 | struct intel_gvt_partial_pte { | ||
136 | unsigned long offset; | ||
137 | u64 data; | ||
138 | struct list_head list; | ||
139 | }; | ||
140 | |||
136 | struct intel_vgpu_mm { | 141 | struct intel_vgpu_mm { |
137 | enum intel_gvt_mm_type type; | 142 | enum intel_gvt_mm_type type; |
138 | struct intel_vgpu *vgpu; | 143 | struct intel_vgpu *vgpu; |
@@ -157,8 +162,7 @@ struct intel_vgpu_mm { | |||
157 | } ppgtt_mm; | 162 | } ppgtt_mm; |
158 | struct { | 163 | struct { |
159 | void *virtual_ggtt; | 164 | void *virtual_ggtt; |
160 | unsigned long last_partial_off; | 165 | struct list_head partial_pte_list; |
161 | u64 last_partial_data; | ||
162 | } ggtt_mm; | 166 | } ggtt_mm; |
163 | }; | 167 | }; |
164 | }; | 168 | }; |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 90f50f67909a..aa280bb07125 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu, | |||
1609 | return 0; | 1609 | return 0; |
1610 | } | 1610 | } |
1611 | 1611 | ||
1612 | static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu, | 1612 | static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, |
1613 | unsigned int offset, void *p_data, unsigned int bytes) | 1613 | unsigned int offset, void *p_data, unsigned int bytes) |
1614 | { | 1614 | { |
1615 | vgpu_vreg(vgpu, offset) = 0; | 1615 | vgpu_vreg(vgpu, offset) = 0; |
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
2607 | MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2607 | MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2608 | MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2608 | MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2609 | MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2609 | MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2610 | |||
2611 | MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); | ||
2612 | MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); | ||
2610 | return 0; | 2613 | return 0; |
2611 | } | 2614 | } |
2612 | 2615 | ||
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) | |||
3205 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); | 3208 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); |
3206 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); | 3209 | MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT); |
3207 | 3210 | ||
3208 | MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); | ||
3209 | MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write); | ||
3210 | |||
3211 | MMIO_D(RC6_CTX_BASE, D_BXT); | 3211 | MMIO_D(RC6_CTX_BASE, D_BXT); |
3212 | 3212 | ||
3213 | MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); | 3213 | MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT); |
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 10e63eea5492..36a5147cd01e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c | |||
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { | |||
131 | {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ | 131 | {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ |
132 | 132 | ||
133 | {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ | 133 | {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ |
134 | {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */ | 134 | {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ |
135 | 135 | ||
136 | {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ | 136 | {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ |
137 | {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ | 137 | {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index ea34003d6dd2..b8fbe3fabea3 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); | 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
335 | } | 335 | } |
336 | 336 | ||
337 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | ||
338 | struct i915_gem_context *ctx) | ||
339 | { | ||
340 | struct intel_vgpu_mm *mm = workload->shadow_mm; | ||
341 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; | ||
342 | int i = 0; | ||
343 | |||
344 | if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) | ||
345 | return -1; | ||
346 | |||
347 | if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { | ||
348 | px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; | ||
349 | } else { | ||
350 | for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { | ||
351 | px_dma(ppgtt->pdp.page_directory[i]) = | ||
352 | mm->ppgtt_mm.shadow_pdps[i]; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | return 0; | ||
357 | } | ||
358 | |||
337 | /** | 359 | /** |
338 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and | 360 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and |
339 | * shadow it as well, include ringbuffer,wa_ctx and ctx. | 361 | * shadow it as well, include ringbuffer,wa_ctx and ctx. |
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
358 | if (workload->req) | 380 | if (workload->req) |
359 | return 0; | 381 | return 0; |
360 | 382 | ||
383 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); | ||
384 | if (ret < 0) { | ||
385 | gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); | ||
386 | return ret; | ||
387 | } | ||
388 | |||
361 | /* pin shadow context by gvt even the shadow context will be pinned | 389 | /* pin shadow context by gvt even the shadow context will be pinned |
362 | * when i915 alloc request. That is because gvt will update the guest | 390 | * when i915 alloc request. That is because gvt will update the guest |
363 | * context from shadow context when workload is completed, and at that | 391 | * context from shadow context when workload is completed, and at that |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4f3ac0a12889..7f455bca528e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) | |||
1788 | if (!IS_GEN5(dev_priv)) | 1788 | if (!IS_GEN5(dev_priv)) |
1789 | return -ENODEV; | 1789 | return -ENODEV; |
1790 | 1790 | ||
1791 | intel_runtime_pm_get(dev_priv); | ||
1792 | |||
1791 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 1793 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
1792 | if (ret) | 1794 | if (ret) |
1793 | return ret; | 1795 | return ret; |
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused) | |||
1802 | seq_printf(m, "GFX power: %ld\n", gfx); | 1804 | seq_printf(m, "GFX power: %ld\n", gfx); |
1803 | seq_printf(m, "Total power: %ld\n", chipset + gfx); | 1805 | seq_printf(m, "Total power: %ld\n", chipset + gfx); |
1804 | 1806 | ||
1807 | intel_runtime_pm_put(dev_priv); | ||
1808 | |||
1805 | return 0; | 1809 | return 0; |
1806 | } | 1810 | } |
1807 | 1811 | ||
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) | |||
2215 | struct drm_i915_private *dev_priv = node_to_i915(m->private); | 2219 | struct drm_i915_private *dev_priv = node_to_i915(m->private); |
2216 | struct drm_device *dev = &dev_priv->drm; | 2220 | struct drm_device *dev = &dev_priv->drm; |
2217 | struct intel_rps *rps = &dev_priv->gt_pm.rps; | 2221 | struct intel_rps *rps = &dev_priv->gt_pm.rps; |
2222 | u32 act_freq = rps->cur_freq; | ||
2218 | struct drm_file *file; | 2223 | struct drm_file *file; |
2219 | 2224 | ||
2225 | if (intel_runtime_pm_get_if_in_use(dev_priv)) { | ||
2226 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | ||
2227 | mutex_lock(&dev_priv->pcu_lock); | ||
2228 | act_freq = vlv_punit_read(dev_priv, | ||
2229 | PUNIT_REG_GPU_FREQ_STS); | ||
2230 | act_freq = (act_freq >> 8) & 0xff; | ||
2231 | mutex_unlock(&dev_priv->pcu_lock); | ||
2232 | } else { | ||
2233 | act_freq = intel_get_cagf(dev_priv, | ||
2234 | I915_READ(GEN6_RPSTAT1)); | ||
2235 | } | ||
2236 | intel_runtime_pm_put(dev_priv); | ||
2237 | } | ||
2238 | |||
2220 | seq_printf(m, "RPS enabled? %d\n", rps->enabled); | 2239 | seq_printf(m, "RPS enabled? %d\n", rps->enabled); |
2221 | seq_printf(m, "GPU busy? %s [%d requests]\n", | 2240 | seq_printf(m, "GPU busy? %s [%d requests]\n", |
2222 | yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); | 2241 | yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); |
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) | |||
2224 | seq_printf(m, "Boosts outstanding? %d\n", | 2243 | seq_printf(m, "Boosts outstanding? %d\n", |
2225 | atomic_read(&rps->num_waiters)); | 2244 | atomic_read(&rps->num_waiters)); |
2226 | seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); | 2245 | seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); |
2227 | seq_printf(m, "Frequency requested %d\n", | 2246 | seq_printf(m, "Frequency requested %d, actual %d\n", |
2228 | intel_gpu_freq(dev_priv, rps->cur_freq)); | 2247 | intel_gpu_freq(dev_priv, rps->cur_freq), |
2248 | intel_gpu_freq(dev_priv, act_freq)); | ||
2229 | seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", | 2249 | seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", |
2230 | intel_gpu_freq(dev_priv, rps->min_freq), | 2250 | intel_gpu_freq(dev_priv, rps->min_freq), |
2231 | intel_gpu_freq(dev_priv, rps->min_freq_softlimit), | 2251 | intel_gpu_freq(dev_priv, rps->min_freq_softlimit), |
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused) | |||
2900 | seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), | 2920 | seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version), |
2901 | CSR_VERSION_MINOR(csr->version)); | 2921 | CSR_VERSION_MINOR(csr->version)); |
2902 | 2922 | ||
2903 | if (IS_KABYLAKE(dev_priv) || | 2923 | if (WARN_ON(INTEL_GEN(dev_priv) > 11)) |
2904 | (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) { | 2924 | goto out; |
2905 | seq_printf(m, "DC3 -> DC5 count: %d\n", | 2925 | |
2906 | I915_READ(SKL_CSR_DC3_DC5_COUNT)); | 2926 | seq_printf(m, "DC3 -> DC5 count: %d\n", |
2927 | I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : | ||
2928 | SKL_CSR_DC3_DC5_COUNT)); | ||
2929 | if (!IS_GEN9_LP(dev_priv)) | ||
2907 | seq_printf(m, "DC5 -> DC6 count: %d\n", | 2930 | seq_printf(m, "DC5 -> DC6 count: %d\n", |
2908 | I915_READ(SKL_CSR_DC5_DC6_COUNT)); | 2931 | I915_READ(SKL_CSR_DC5_DC6_COUNT)); |
2909 | } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) { | ||
2910 | seq_printf(m, "DC3 -> DC5 count: %d\n", | ||
2911 | I915_READ(BXT_CSR_DC3_DC5_COUNT)); | ||
2912 | } | ||
2913 | 2932 | ||
2914 | out: | 2933 | out: |
2915 | seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); | 2934 | seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0))); |
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m, | |||
3049 | seq_printf(m, "connector %d: type %s, status: %s\n", | 3068 | seq_printf(m, "connector %d: type %s, status: %s\n", |
3050 | connector->base.id, connector->name, | 3069 | connector->base.id, connector->name, |
3051 | drm_get_connector_status_name(connector->status)); | 3070 | drm_get_connector_status_name(connector->status)); |
3052 | if (connector->status == connector_status_connected) { | 3071 | |
3053 | seq_printf(m, "\tname: %s\n", connector->display_info.name); | 3072 | if (connector->status == connector_status_disconnected) |
3054 | seq_printf(m, "\tphysical dimensions: %dx%dmm\n", | 3073 | return; |
3055 | connector->display_info.width_mm, | 3074 | |
3056 | connector->display_info.height_mm); | 3075 | seq_printf(m, "\tname: %s\n", connector->display_info.name); |
3057 | seq_printf(m, "\tsubpixel order: %s\n", | 3076 | seq_printf(m, "\tphysical dimensions: %dx%dmm\n", |
3058 | drm_get_subpixel_order_name(connector->display_info.subpixel_order)); | 3077 | connector->display_info.width_mm, |
3059 | seq_printf(m, "\tCEA rev: %d\n", | 3078 | connector->display_info.height_mm); |
3060 | connector->display_info.cea_rev); | 3079 | seq_printf(m, "\tsubpixel order: %s\n", |
3061 | } | 3080 | drm_get_subpixel_order_name(connector->display_info.subpixel_order)); |
3081 | seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev); | ||
3062 | 3082 | ||
3063 | if (!intel_encoder) | 3083 | if (!intel_encoder) |
3064 | return; | 3084 | return; |
@@ -4172,6 +4192,7 @@ i915_drop_caches_set(void *data, u64 val) | |||
4172 | 4192 | ||
4173 | DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", | 4193 | DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", |
4174 | val, val & DROP_ALL); | 4194 | val, val & DROP_ALL); |
4195 | intel_runtime_pm_get(i915); | ||
4175 | 4196 | ||
4176 | if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) | 4197 | if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915)) |
4177 | i915_gem_set_wedged(i915); | 4198 | i915_gem_set_wedged(i915); |
@@ -4181,7 +4202,7 @@ i915_drop_caches_set(void *data, u64 val) | |||
4181 | if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { | 4202 | if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { |
4182 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); | 4203 | ret = mutex_lock_interruptible(&i915->drm.struct_mutex); |
4183 | if (ret) | 4204 | if (ret) |
4184 | return ret; | 4205 | goto out; |
4185 | 4206 | ||
4186 | if (val & DROP_ACTIVE) | 4207 | if (val & DROP_ACTIVE) |
4187 | ret = i915_gem_wait_for_idle(i915, | 4208 | ret = i915_gem_wait_for_idle(i915, |
@@ -4189,11 +4210,8 @@ i915_drop_caches_set(void *data, u64 val) | |||
4189 | I915_WAIT_LOCKED, | 4210 | I915_WAIT_LOCKED, |
4190 | MAX_SCHEDULE_TIMEOUT); | 4211 | MAX_SCHEDULE_TIMEOUT); |
4191 | 4212 | ||
4192 | if (ret == 0 && val & DROP_RESET_SEQNO) { | 4213 | if (ret == 0 && val & DROP_RESET_SEQNO) |
4193 | intel_runtime_pm_get(i915); | ||
4194 | ret = i915_gem_set_global_seqno(&i915->drm, 1); | 4214 | ret = i915_gem_set_global_seqno(&i915->drm, 1); |
4195 | intel_runtime_pm_put(i915); | ||
4196 | } | ||
4197 | 4215 | ||
4198 | if (val & DROP_RETIRE) | 4216 | if (val & DROP_RETIRE) |
4199 | i915_retire_requests(i915); | 4217 | i915_retire_requests(i915); |
@@ -4231,6 +4249,9 @@ i915_drop_caches_set(void *data, u64 val) | |||
4231 | if (val & DROP_FREED) | 4249 | if (val & DROP_FREED) |
4232 | i915_gem_drain_freed_objects(i915); | 4250 | i915_gem_drain_freed_objects(i915); |
4233 | 4251 | ||
4252 | out: | ||
4253 | intel_runtime_pm_put(i915); | ||
4254 | |||
4234 | return ret; | 4255 | return ret; |
4235 | } | 4256 | } |
4236 | 4257 | ||
@@ -4331,7 +4352,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, | |||
4331 | for (s = 0; s < info->sseu.max_slices; s++) { | 4352 | for (s = 0; s < info->sseu.max_slices; s++) { |
4332 | /* | 4353 | /* |
4333 | * FIXME: Valid SS Mask respects the spec and read | 4354 | * FIXME: Valid SS Mask respects the spec and read |
4334 | * only valid bits for those registers, excluding reserverd | 4355 | * only valid bits for those registers, excluding reserved |
4335 | * although this seems wrong because it would leave many | 4356 | * although this seems wrong because it would leave many |
4336 | * subslices without ACK. | 4357 | * subslices without ACK. |
4337 | */ | 4358 | */ |
@@ -4641,24 +4662,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { | |||
4641 | .write = i915_hpd_storm_ctl_write | 4662 | .write = i915_hpd_storm_ctl_write |
4642 | }; | 4663 | }; |
4643 | 4664 | ||
4665 | static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) | ||
4666 | { | ||
4667 | struct drm_i915_private *dev_priv = m->private; | ||
4668 | |||
4669 | seq_printf(m, "Enabled: %s\n", | ||
4670 | yesno(dev_priv->hotplug.hpd_short_storm_enabled)); | ||
4671 | |||
4672 | return 0; | ||
4673 | } | ||
4674 | |||
4675 | static int | ||
4676 | i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file) | ||
4677 | { | ||
4678 | return single_open(file, i915_hpd_short_storm_ctl_show, | ||
4679 | inode->i_private); | ||
4680 | } | ||
4681 | |||
4682 | static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, | ||
4683 | const char __user *ubuf, | ||
4684 | size_t len, loff_t *offp) | ||
4685 | { | ||
4686 | struct seq_file *m = file->private_data; | ||
4687 | struct drm_i915_private *dev_priv = m->private; | ||
4688 | struct i915_hotplug *hotplug = &dev_priv->hotplug; | ||
4689 | char *newline; | ||
4690 | char tmp[16]; | ||
4691 | int i; | ||
4692 | bool new_state; | ||
4693 | |||
4694 | if (len >= sizeof(tmp)) | ||
4695 | return -EINVAL; | ||
4696 | |||
4697 | if (copy_from_user(tmp, ubuf, len)) | ||
4698 | return -EFAULT; | ||
4699 | |||
4700 | tmp[len] = '\0'; | ||
4701 | |||
4702 | /* Strip newline, if any */ | ||
4703 | newline = strchr(tmp, '\n'); | ||
4704 | if (newline) | ||
4705 | *newline = '\0'; | ||
4706 | |||
4707 | /* Reset to the "default" state for this system */ | ||
4708 | if (strcmp(tmp, "reset") == 0) | ||
4709 | new_state = !HAS_DP_MST(dev_priv); | ||
4710 | else if (kstrtobool(tmp, &new_state) != 0) | ||
4711 | return -EINVAL; | ||
4712 | |||
4713 | DRM_DEBUG_KMS("%sabling HPD short storm detection\n", | ||
4714 | new_state ? "En" : "Dis"); | ||
4715 | |||
4716 | spin_lock_irq(&dev_priv->irq_lock); | ||
4717 | hotplug->hpd_short_storm_enabled = new_state; | ||
4718 | /* Reset the HPD storm stats so we don't accidentally trigger a storm */ | ||
4719 | for_each_hpd_pin(i) | ||
4720 | hotplug->stats[i].count = 0; | ||
4721 | spin_unlock_irq(&dev_priv->irq_lock); | ||
4722 | |||
4723 | /* Re-enable hpd immediately if we were in an irq storm */ | ||
4724 | flush_delayed_work(&dev_priv->hotplug.reenable_work); | ||
4725 | |||
4726 | return len; | ||
4727 | } | ||
4728 | |||
4729 | static const struct file_operations i915_hpd_short_storm_ctl_fops = { | ||
4730 | .owner = THIS_MODULE, | ||
4731 | .open = i915_hpd_short_storm_ctl_open, | ||
4732 | .read = seq_read, | ||
4733 | .llseek = seq_lseek, | ||
4734 | .release = single_release, | ||
4735 | .write = i915_hpd_short_storm_ctl_write, | ||
4736 | }; | ||
4737 | |||
4644 | static int i915_drrs_ctl_set(void *data, u64 val) | 4738 | static int i915_drrs_ctl_set(void *data, u64 val) |
4645 | { | 4739 | { |
4646 | struct drm_i915_private *dev_priv = data; | 4740 | struct drm_i915_private *dev_priv = data; |
4647 | struct drm_device *dev = &dev_priv->drm; | 4741 | struct drm_device *dev = &dev_priv->drm; |
4648 | struct intel_crtc *intel_crtc; | 4742 | struct intel_crtc *crtc; |
4649 | struct intel_encoder *encoder; | ||
4650 | struct intel_dp *intel_dp; | ||
4651 | 4743 | ||
4652 | if (INTEL_GEN(dev_priv) < 7) | 4744 | if (INTEL_GEN(dev_priv) < 7) |
4653 | return -ENODEV; | 4745 | return -ENODEV; |
4654 | 4746 | ||
4655 | drm_modeset_lock_all(dev); | 4747 | for_each_intel_crtc(dev, crtc) { |
4656 | for_each_intel_crtc(dev, intel_crtc) { | 4748 | struct drm_connector_list_iter conn_iter; |
4657 | if (!intel_crtc->base.state->active || | 4749 | struct intel_crtc_state *crtc_state; |
4658 | !intel_crtc->config->has_drrs) | 4750 | struct drm_connector *connector; |
4659 | continue; | 4751 | struct drm_crtc_commit *commit; |
4752 | int ret; | ||
4753 | |||
4754 | ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex); | ||
4755 | if (ret) | ||
4756 | return ret; | ||
4757 | |||
4758 | crtc_state = to_intel_crtc_state(crtc->base.state); | ||
4759 | |||
4760 | if (!crtc_state->base.active || | ||
4761 | !crtc_state->has_drrs) | ||
4762 | goto out; | ||
4660 | 4763 | ||
4661 | for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) { | 4764 | commit = crtc_state->base.commit; |
4765 | if (commit) { | ||
4766 | ret = wait_for_completion_interruptible(&commit->hw_done); | ||
4767 | if (ret) | ||
4768 | goto out; | ||
4769 | } | ||
4770 | |||
4771 | drm_connector_list_iter_begin(dev, &conn_iter); | ||
4772 | drm_for_each_connector_iter(connector, &conn_iter) { | ||
4773 | struct intel_encoder *encoder; | ||
4774 | struct intel_dp *intel_dp; | ||
4775 | |||
4776 | if (!(crtc_state->base.connector_mask & | ||
4777 | drm_connector_mask(connector))) | ||
4778 | continue; | ||
4779 | |||
4780 | encoder = intel_attached_encoder(connector); | ||
4662 | if (encoder->type != INTEL_OUTPUT_EDP) | 4781 | if (encoder->type != INTEL_OUTPUT_EDP) |
4663 | continue; | 4782 | continue; |
4664 | 4783 | ||
@@ -4668,13 +4787,18 @@ static int i915_drrs_ctl_set(void *data, u64 val) | |||
4668 | intel_dp = enc_to_intel_dp(&encoder->base); | 4787 | intel_dp = enc_to_intel_dp(&encoder->base); |
4669 | if (val) | 4788 | if (val) |
4670 | intel_edp_drrs_enable(intel_dp, | 4789 | intel_edp_drrs_enable(intel_dp, |
4671 | intel_crtc->config); | 4790 | crtc_state); |
4672 | else | 4791 | else |
4673 | intel_edp_drrs_disable(intel_dp, | 4792 | intel_edp_drrs_disable(intel_dp, |
4674 | intel_crtc->config); | 4793 | crtc_state); |
4675 | } | 4794 | } |
4795 | drm_connector_list_iter_end(&conn_iter); | ||
4796 | |||
4797 | out: | ||
4798 | drm_modeset_unlock(&crtc->base.mutex); | ||
4799 | if (ret) | ||
4800 | return ret; | ||
4676 | } | 4801 | } |
4677 | drm_modeset_unlock_all(dev); | ||
4678 | 4802 | ||
4679 | return 0; | 4803 | return 0; |
4680 | } | 4804 | } |
@@ -4818,6 +4942,7 @@ static const struct i915_debugfs_files { | |||
4818 | {"i915_guc_log_level", &i915_guc_log_level_fops}, | 4942 | {"i915_guc_log_level", &i915_guc_log_level_fops}, |
4819 | {"i915_guc_log_relay", &i915_guc_log_relay_fops}, | 4943 | {"i915_guc_log_relay", &i915_guc_log_relay_fops}, |
4820 | {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, | 4944 | {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, |
4945 | {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops}, | ||
4821 | {"i915_ipc_status", &i915_ipc_status_fops}, | 4946 | {"i915_ipc_status", &i915_ipc_status_fops}, |
4822 | {"i915_drrs_ctl", &i915_drrs_ctl_fops}, | 4947 | {"i915_drrs_ctl", &i915_drrs_ctl_fops}, |
4823 | {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} | 4948 | {"i915_edp_psr_debug", &i915_edp_psr_debug_fops} |
@@ -4899,13 +5024,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data) | |||
4899 | continue; | 5024 | continue; |
4900 | 5025 | ||
4901 | err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); | 5026 | err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size); |
4902 | if (err <= 0) { | 5027 | if (err < 0) |
4903 | DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", | 5028 | seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err); |
4904 | size, b->offset, err); | 5029 | else |
4905 | continue; | 5030 | seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf); |
4906 | } | ||
4907 | |||
4908 | seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); | ||
4909 | } | 5031 | } |
4910 | 5032 | ||
4911 | return 0; | 5033 | return 0; |
@@ -4934,6 +5056,28 @@ static int i915_panel_show(struct seq_file *m, void *data) | |||
4934 | } | 5056 | } |
4935 | DEFINE_SHOW_ATTRIBUTE(i915_panel); | 5057 | DEFINE_SHOW_ATTRIBUTE(i915_panel); |
4936 | 5058 | ||
5059 | static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data) | ||
5060 | { | ||
5061 | struct drm_connector *connector = m->private; | ||
5062 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
5063 | |||
5064 | if (connector->status != connector_status_connected) | ||
5065 | return -ENODEV; | ||
5066 | |||
5067 | /* HDCP is supported by connector */ | ||
5068 | if (!intel_connector->hdcp.shim) | ||
5069 | return -EINVAL; | ||
5070 | |||
5071 | seq_printf(m, "%s:%d HDCP version: ", connector->name, | ||
5072 | connector->base.id); | ||
5073 | seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ? | ||
5074 | "None" : "HDCP1.4"); | ||
5075 | seq_puts(m, "\n"); | ||
5076 | |||
5077 | return 0; | ||
5078 | } | ||
5079 | DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); | ||
5080 | |||
4937 | /** | 5081 | /** |
4938 | * i915_debugfs_connector_add - add i915 specific connector debugfs files | 5082 | * i915_debugfs_connector_add - add i915 specific connector debugfs files |
4939 | * @connector: pointer to a registered drm_connector | 5083 | * @connector: pointer to a registered drm_connector |
@@ -4963,5 +5107,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector) | |||
4963 | connector, &i915_psr_sink_status_fops); | 5107 | connector, &i915_psr_sink_status_fops); |
4964 | } | 5108 | } |
4965 | 5109 | ||
5110 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || | ||
5111 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | ||
5112 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { | ||
5113 | debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root, | ||
5114 | connector, &i915_hdcp_sink_capability_fops); | ||
5115 | } | ||
5116 | |||
4966 | return 0; | 5117 | return 0; |
4967 | } | 5118 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44e2c0f5ec50..b1d23c73c147 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -345,7 +345,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, | |||
345 | value = HAS_WT(dev_priv); | 345 | value = HAS_WT(dev_priv); |
346 | break; | 346 | break; |
347 | case I915_PARAM_HAS_ALIASING_PPGTT: | 347 | case I915_PARAM_HAS_ALIASING_PPGTT: |
348 | value = USES_PPGTT(dev_priv); | 348 | value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); |
349 | break; | 349 | break; |
350 | case I915_PARAM_HAS_SEMAPHORES: | 350 | case I915_PARAM_HAS_SEMAPHORES: |
351 | value = HAS_LEGACY_SEMAPHORES(dev_priv); | 351 | value = HAS_LEGACY_SEMAPHORES(dev_priv); |
@@ -645,6 +645,13 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
645 | if (i915_inject_load_failure()) | 645 | if (i915_inject_load_failure()) |
646 | return -ENODEV; | 646 | return -ENODEV; |
647 | 647 | ||
648 | if (INTEL_INFO(dev_priv)->num_pipes) { | ||
649 | ret = drm_vblank_init(&dev_priv->drm, | ||
650 | INTEL_INFO(dev_priv)->num_pipes); | ||
651 | if (ret) | ||
652 | goto out; | ||
653 | } | ||
654 | |||
648 | intel_bios_init(dev_priv); | 655 | intel_bios_init(dev_priv); |
649 | 656 | ||
650 | /* If we have > 1 VGA cards, then we need to arbitrate access | 657 | /* If we have > 1 VGA cards, then we need to arbitrate access |
@@ -687,7 +694,7 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
687 | if (ret) | 694 | if (ret) |
688 | goto cleanup_modeset; | 695 | goto cleanup_modeset; |
689 | 696 | ||
690 | intel_setup_overlay(dev_priv); | 697 | intel_overlay_setup(dev_priv); |
691 | 698 | ||
692 | if (INTEL_INFO(dev_priv)->num_pipes == 0) | 699 | if (INTEL_INFO(dev_priv)->num_pipes == 0) |
693 | return 0; | 700 | return 0; |
@@ -699,6 +706,8 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
699 | /* Only enable hotplug handling once the fbdev is fully set up. */ | 706 | /* Only enable hotplug handling once the fbdev is fully set up. */ |
700 | intel_hpd_init(dev_priv); | 707 | intel_hpd_init(dev_priv); |
701 | 708 | ||
709 | intel_init_ipc(dev_priv); | ||
710 | |||
702 | return 0; | 711 | return 0; |
703 | 712 | ||
704 | cleanup_gem: | 713 | cleanup_gem: |
@@ -1030,6 +1039,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) | |||
1030 | 1039 | ||
1031 | err_uncore: | 1040 | err_uncore: |
1032 | intel_uncore_fini(dev_priv); | 1041 | intel_uncore_fini(dev_priv); |
1042 | i915_mmio_cleanup(dev_priv); | ||
1033 | err_bridge: | 1043 | err_bridge: |
1034 | pci_dev_put(dev_priv->bridge_dev); | 1044 | pci_dev_put(dev_priv->bridge_dev); |
1035 | 1045 | ||
@@ -1049,17 +1059,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) | |||
1049 | 1059 | ||
1050 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) | 1060 | static void intel_sanitize_options(struct drm_i915_private *dev_priv) |
1051 | { | 1061 | { |
1052 | /* | ||
1053 | * i915.enable_ppgtt is read-only, so do an early pass to validate the | ||
1054 | * user's requested state against the hardware/driver capabilities. We | ||
1055 | * do this now so that we can print out any log messages once rather | ||
1056 | * than every time we check intel_enable_ppgtt(). | ||
1057 | */ | ||
1058 | i915_modparams.enable_ppgtt = | ||
1059 | intel_sanitize_enable_ppgtt(dev_priv, | ||
1060 | i915_modparams.enable_ppgtt); | ||
1061 | DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); | ||
1062 | |||
1063 | intel_gvt_sanitize_options(dev_priv); | 1062 | intel_gvt_sanitize_options(dev_priv); |
1064 | } | 1063 | } |
1065 | 1064 | ||
@@ -1175,8 +1174,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) | |||
1175 | return -EINVAL; | 1174 | return -EINVAL; |
1176 | } | 1175 | } |
1177 | 1176 | ||
1178 | dram_info->valid_dimm = true; | ||
1179 | |||
1180 | /* | 1177 | /* |
1181 | * If any of the channel is single rank channel, worst case output | 1178 | * If any of the channel is single rank channel, worst case output |
1182 | * will be same as if single rank memory, so consider single rank | 1179 | * will be same as if single rank memory, so consider single rank |
@@ -1193,8 +1190,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) | |||
1193 | return -EINVAL; | 1190 | return -EINVAL; |
1194 | } | 1191 | } |
1195 | 1192 | ||
1196 | if (ch0.is_16gb_dimm || ch1.is_16gb_dimm) | 1193 | dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; |
1197 | dram_info->is_16gb_dimm = true; | ||
1198 | 1194 | ||
1199 | dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, | 1195 | dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, |
1200 | val_ch1, | 1196 | val_ch1, |
@@ -1314,7 +1310,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv) | |||
1314 | return -EINVAL; | 1310 | return -EINVAL; |
1315 | } | 1311 | } |
1316 | 1312 | ||
1317 | dram_info->valid_dimm = true; | ||
1318 | dram_info->valid = true; | 1313 | dram_info->valid = true; |
1319 | return 0; | 1314 | return 0; |
1320 | } | 1315 | } |
@@ -1327,19 +1322,24 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) | |||
1327 | int ret; | 1322 | int ret; |
1328 | 1323 | ||
1329 | dram_info->valid = false; | 1324 | dram_info->valid = false; |
1330 | dram_info->valid_dimm = false; | ||
1331 | dram_info->is_16gb_dimm = false; | ||
1332 | dram_info->rank = I915_DRAM_RANK_INVALID; | 1325 | dram_info->rank = I915_DRAM_RANK_INVALID; |
1333 | dram_info->bandwidth_kbps = 0; | 1326 | dram_info->bandwidth_kbps = 0; |
1334 | dram_info->num_channels = 0; | 1327 | dram_info->num_channels = 0; |
1335 | 1328 | ||
1329 | /* | ||
1330 | * Assume 16Gb DIMMs are present until proven otherwise. | ||
1331 | * This is only used for the level 0 watermark latency | ||
1332 | * w/a which does not apply to bxt/glk. | ||
1333 | */ | ||
1334 | dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); | ||
1335 | |||
1336 | if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) | 1336 | if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) |
1337 | return; | 1337 | return; |
1338 | 1338 | ||
1339 | /* Need to calculate bandwidth only for Gen9 */ | 1339 | /* Need to calculate bandwidth only for Gen9 */ |
1340 | if (IS_BROXTON(dev_priv)) | 1340 | if (IS_BROXTON(dev_priv)) |
1341 | ret = bxt_get_dram_info(dev_priv); | 1341 | ret = bxt_get_dram_info(dev_priv); |
1342 | else if (INTEL_GEN(dev_priv) == 9) | 1342 | else if (IS_GEN9(dev_priv)) |
1343 | ret = skl_get_dram_info(dev_priv); | 1343 | ret = skl_get_dram_info(dev_priv); |
1344 | else | 1344 | else |
1345 | ret = skl_dram_get_channels_info(dev_priv); | 1345 | ret = skl_dram_get_channels_info(dev_priv); |
@@ -1374,6 +1374,15 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) | |||
1374 | 1374 | ||
1375 | intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); | 1375 | intel_device_info_runtime_init(mkwrite_device_info(dev_priv)); |
1376 | 1376 | ||
1377 | if (HAS_PPGTT(dev_priv)) { | ||
1378 | if (intel_vgpu_active(dev_priv) && | ||
1379 | !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) { | ||
1380 | i915_report_error(dev_priv, | ||
1381 | "incompatible vGPU found, support for isolated ppGTT required\n"); | ||
1382 | return -ENXIO; | ||
1383 | } | ||
1384 | } | ||
1385 | |||
1377 | intel_sanitize_options(dev_priv); | 1386 | intel_sanitize_options(dev_priv); |
1378 | 1387 | ||
1379 | i915_perf_init(dev_priv); | 1388 | i915_perf_init(dev_priv); |
@@ -1629,14 +1638,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1629 | (struct intel_device_info *)ent->driver_data; | 1638 | (struct intel_device_info *)ent->driver_data; |
1630 | struct intel_device_info *device_info; | 1639 | struct intel_device_info *device_info; |
1631 | struct drm_i915_private *i915; | 1640 | struct drm_i915_private *i915; |
1641 | int err; | ||
1632 | 1642 | ||
1633 | i915 = kzalloc(sizeof(*i915), GFP_KERNEL); | 1643 | i915 = kzalloc(sizeof(*i915), GFP_KERNEL); |
1634 | if (!i915) | 1644 | if (!i915) |
1635 | return NULL; | 1645 | return ERR_PTR(-ENOMEM); |
1636 | 1646 | ||
1637 | if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) { | 1647 | err = drm_dev_init(&i915->drm, &driver, &pdev->dev); |
1648 | if (err) { | ||
1638 | kfree(i915); | 1649 | kfree(i915); |
1639 | return NULL; | 1650 | return ERR_PTR(err); |
1640 | } | 1651 | } |
1641 | 1652 | ||
1642 | i915->drm.pdev = pdev; | 1653 | i915->drm.pdev = pdev; |
@@ -1649,8 +1660,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1649 | device_info->device_id = pdev->device; | 1660 | device_info->device_id = pdev->device; |
1650 | 1661 | ||
1651 | BUILD_BUG_ON(INTEL_MAX_PLATFORMS > | 1662 | BUILD_BUG_ON(INTEL_MAX_PLATFORMS > |
1652 | sizeof(device_info->platform_mask) * BITS_PER_BYTE); | 1663 | BITS_PER_TYPE(device_info->platform_mask)); |
1653 | BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); | 1664 | BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask)); |
1654 | 1665 | ||
1655 | return i915; | 1666 | return i915; |
1656 | } | 1667 | } |
@@ -1685,8 +1696,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1685 | int ret; | 1696 | int ret; |
1686 | 1697 | ||
1687 | dev_priv = i915_driver_create(pdev, ent); | 1698 | dev_priv = i915_driver_create(pdev, ent); |
1688 | if (!dev_priv) | 1699 | if (IS_ERR(dev_priv)) |
1689 | return -ENOMEM; | 1700 | return PTR_ERR(dev_priv); |
1690 | 1701 | ||
1691 | /* Disable nuclear pageflip by default on pre-ILK */ | 1702 | /* Disable nuclear pageflip by default on pre-ILK */ |
1692 | if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) | 1703 | if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) |
@@ -1710,26 +1721,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1710 | if (ret < 0) | 1721 | if (ret < 0) |
1711 | goto out_cleanup_mmio; | 1722 | goto out_cleanup_mmio; |
1712 | 1723 | ||
1713 | /* | ||
1714 | * TODO: move the vblank init and parts of modeset init steps into one | ||
1715 | * of the i915_driver_init_/i915_driver_register functions according | ||
1716 | * to the role/effect of the given init step. | ||
1717 | */ | ||
1718 | if (INTEL_INFO(dev_priv)->num_pipes) { | ||
1719 | ret = drm_vblank_init(&dev_priv->drm, | ||
1720 | INTEL_INFO(dev_priv)->num_pipes); | ||
1721 | if (ret) | ||
1722 | goto out_cleanup_hw; | ||
1723 | } | ||
1724 | |||
1725 | ret = i915_load_modeset_init(&dev_priv->drm); | 1724 | ret = i915_load_modeset_init(&dev_priv->drm); |
1726 | if (ret < 0) | 1725 | if (ret < 0) |
1727 | goto out_cleanup_hw; | 1726 | goto out_cleanup_hw; |
1728 | 1727 | ||
1729 | i915_driver_register(dev_priv); | 1728 | i915_driver_register(dev_priv); |
1730 | 1729 | ||
1731 | intel_init_ipc(dev_priv); | ||
1732 | |||
1733 | enable_rpm_wakeref_asserts(dev_priv); | 1730 | enable_rpm_wakeref_asserts(dev_priv); |
1734 | 1731 | ||
1735 | i915_welcome_messages(dev_priv); | 1732 | i915_welcome_messages(dev_priv); |
@@ -1781,7 +1778,6 @@ void i915_driver_unload(struct drm_device *dev) | |||
1781 | i915_reset_error_state(dev_priv); | 1778 | i915_reset_error_state(dev_priv); |
1782 | 1779 | ||
1783 | i915_gem_fini(dev_priv); | 1780 | i915_gem_fini(dev_priv); |
1784 | intel_fbc_cleanup_cfb(dev_priv); | ||
1785 | 1781 | ||
1786 | intel_power_domains_fini_hw(dev_priv); | 1782 | intel_power_domains_fini_hw(dev_priv); |
1787 | 1783 | ||
@@ -1919,9 +1915,7 @@ static int i915_drm_suspend(struct drm_device *dev) | |||
1919 | i915_save_state(dev_priv); | 1915 | i915_save_state(dev_priv); |
1920 | 1916 | ||
1921 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; | 1917 | opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; |
1922 | intel_opregion_notify_adapter(dev_priv, opregion_target_state); | 1918 | intel_opregion_suspend(dev_priv, opregion_target_state); |
1923 | |||
1924 | intel_opregion_unregister(dev_priv); | ||
1925 | 1919 | ||
1926 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); | 1920 | intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); |
1927 | 1921 | ||
@@ -1962,7 +1956,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) | |||
1962 | get_suspend_mode(dev_priv, hibernation)); | 1956 | get_suspend_mode(dev_priv, hibernation)); |
1963 | 1957 | ||
1964 | ret = 0; | 1958 | ret = 0; |
1965 | if (IS_GEN9_LP(dev_priv)) | 1959 | if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) |
1966 | bxt_enable_dc9(dev_priv); | 1960 | bxt_enable_dc9(dev_priv); |
1967 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1961 | else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
1968 | hsw_enable_pc8(dev_priv); | 1962 | hsw_enable_pc8(dev_priv); |
@@ -2040,7 +2034,6 @@ static int i915_drm_resume(struct drm_device *dev) | |||
2040 | 2034 | ||
2041 | i915_restore_state(dev_priv); | 2035 | i915_restore_state(dev_priv); |
2042 | intel_pps_unlock_regs_wa(dev_priv); | 2036 | intel_pps_unlock_regs_wa(dev_priv); |
2043 | intel_opregion_setup(dev_priv); | ||
2044 | 2037 | ||
2045 | intel_init_pch_refclk(dev_priv); | 2038 | intel_init_pch_refclk(dev_priv); |
2046 | 2039 | ||
@@ -2082,12 +2075,10 @@ static int i915_drm_resume(struct drm_device *dev) | |||
2082 | * */ | 2075 | * */ |
2083 | intel_hpd_init(dev_priv); | 2076 | intel_hpd_init(dev_priv); |
2084 | 2077 | ||
2085 | intel_opregion_register(dev_priv); | 2078 | intel_opregion_resume(dev_priv); |
2086 | 2079 | ||
2087 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); | 2080 | intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); |
2088 | 2081 | ||
2089 | intel_opregion_notify_adapter(dev_priv, PCI_D0); | ||
2090 | |||
2091 | intel_power_domains_enable(dev_priv); | 2082 | intel_power_domains_enable(dev_priv); |
2092 | 2083 | ||
2093 | enable_rpm_wakeref_asserts(dev_priv); | 2084 | enable_rpm_wakeref_asserts(dev_priv); |
@@ -2155,7 +2146,7 @@ static int i915_drm_resume_early(struct drm_device *dev) | |||
2155 | 2146 | ||
2156 | intel_uncore_resume_early(dev_priv); | 2147 | intel_uncore_resume_early(dev_priv); |
2157 | 2148 | ||
2158 | if (IS_GEN9_LP(dev_priv)) { | 2149 | if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { |
2159 | gen9_sanitize_dc_state(dev_priv); | 2150 | gen9_sanitize_dc_state(dev_priv); |
2160 | bxt_disable_dc9(dev_priv); | 2151 | bxt_disable_dc9(dev_priv); |
2161 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 2152 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
@@ -2922,7 +2913,10 @@ static int intel_runtime_suspend(struct device *kdev) | |||
2922 | intel_uncore_suspend(dev_priv); | 2913 | intel_uncore_suspend(dev_priv); |
2923 | 2914 | ||
2924 | ret = 0; | 2915 | ret = 0; |
2925 | if (IS_GEN9_LP(dev_priv)) { | 2916 | if (INTEL_GEN(dev_priv) >= 11) { |
2917 | icl_display_core_uninit(dev_priv); | ||
2918 | bxt_enable_dc9(dev_priv); | ||
2919 | } else if (IS_GEN9_LP(dev_priv)) { | ||
2926 | bxt_display_core_uninit(dev_priv); | 2920 | bxt_display_core_uninit(dev_priv); |
2927 | bxt_enable_dc9(dev_priv); | 2921 | bxt_enable_dc9(dev_priv); |
2928 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 2922 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
@@ -3007,7 +3001,18 @@ static int intel_runtime_resume(struct device *kdev) | |||
3007 | if (intel_uncore_unclaimed_mmio(dev_priv)) | 3001 | if (intel_uncore_unclaimed_mmio(dev_priv)) |
3008 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); | 3002 | DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); |
3009 | 3003 | ||
3010 | if (IS_GEN9_LP(dev_priv)) { | 3004 | if (INTEL_GEN(dev_priv) >= 11) { |
3005 | bxt_disable_dc9(dev_priv); | ||
3006 | icl_display_core_init(dev_priv, true); | ||
3007 | if (dev_priv->csr.dmc_payload) { | ||
3008 | if (dev_priv->csr.allowed_dc_mask & | ||
3009 | DC_STATE_EN_UPTO_DC6) | ||
3010 | skl_enable_dc6(dev_priv); | ||
3011 | else if (dev_priv->csr.allowed_dc_mask & | ||
3012 | DC_STATE_EN_UPTO_DC5) | ||
3013 | gen9_enable_dc5(dev_priv); | ||
3014 | } | ||
3015 | } else if (IS_GEN9_LP(dev_priv)) { | ||
3011 | bxt_disable_dc9(dev_priv); | 3016 | bxt_disable_dc9(dev_priv); |
3012 | bxt_display_core_init(dev_priv, true); | 3017 | bxt_display_core_init(dev_priv, true); |
3013 | if (dev_priv->csr.dmc_payload && | 3018 | if (dev_priv->csr.dmc_payload && |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8624b4bdc242..4064e49dbf70 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <drm/drm_cache.h> | 54 | #include <drm/drm_cache.h> |
55 | #include <drm/drm_util.h> | 55 | #include <drm/drm_util.h> |
56 | 56 | ||
57 | #include "i915_fixed.h" | ||
57 | #include "i915_params.h" | 58 | #include "i915_params.h" |
58 | #include "i915_reg.h" | 59 | #include "i915_reg.h" |
59 | #include "i915_utils.h" | 60 | #include "i915_utils.h" |
@@ -87,8 +88,8 @@ | |||
87 | 88 | ||
88 | #define DRIVER_NAME "i915" | 89 | #define DRIVER_NAME "i915" |
89 | #define DRIVER_DESC "Intel Graphics" | 90 | #define DRIVER_DESC "Intel Graphics" |
90 | #define DRIVER_DATE "20180921" | 91 | #define DRIVER_DATE "20181122" |
91 | #define DRIVER_TIMESTAMP 1537521997 | 92 | #define DRIVER_TIMESTAMP 1542898187 |
92 | 93 | ||
93 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and | 94 | /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and |
94 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions | 95 | * WARN_ON()) for hw state sanity checks to check for unexpected conditions |
@@ -127,144 +128,6 @@ bool i915_error_injected(void); | |||
127 | __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ | 128 | __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ |
128 | fmt, ##__VA_ARGS__) | 129 | fmt, ##__VA_ARGS__) |
129 | 130 | ||
130 | typedef struct { | ||
131 | uint32_t val; | ||
132 | } uint_fixed_16_16_t; | ||
133 | |||
134 | #define FP_16_16_MAX ({ \ | ||
135 | uint_fixed_16_16_t fp; \ | ||
136 | fp.val = UINT_MAX; \ | ||
137 | fp; \ | ||
138 | }) | ||
139 | |||
140 | static inline bool is_fixed16_zero(uint_fixed_16_16_t val) | ||
141 | { | ||
142 | if (val.val == 0) | ||
143 | return true; | ||
144 | return false; | ||
145 | } | ||
146 | |||
147 | static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) | ||
148 | { | ||
149 | uint_fixed_16_16_t fp; | ||
150 | |||
151 | WARN_ON(val > U16_MAX); | ||
152 | |||
153 | fp.val = val << 16; | ||
154 | return fp; | ||
155 | } | ||
156 | |||
157 | static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp) | ||
158 | { | ||
159 | return DIV_ROUND_UP(fp.val, 1 << 16); | ||
160 | } | ||
161 | |||
162 | static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp) | ||
163 | { | ||
164 | return fp.val >> 16; | ||
165 | } | ||
166 | |||
167 | static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, | ||
168 | uint_fixed_16_16_t min2) | ||
169 | { | ||
170 | uint_fixed_16_16_t min; | ||
171 | |||
172 | min.val = min(min1.val, min2.val); | ||
173 | return min; | ||
174 | } | ||
175 | |||
176 | static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, | ||
177 | uint_fixed_16_16_t max2) | ||
178 | { | ||
179 | uint_fixed_16_16_t max; | ||
180 | |||
181 | max.val = max(max1.val, max2.val); | ||
182 | return max; | ||
183 | } | ||
184 | |||
185 | static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) | ||
186 | { | ||
187 | uint_fixed_16_16_t fp; | ||
188 | WARN_ON(val > U32_MAX); | ||
189 | fp.val = (uint32_t) val; | ||
190 | return fp; | ||
191 | } | ||
192 | |||
193 | static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val, | ||
194 | uint_fixed_16_16_t d) | ||
195 | { | ||
196 | return DIV_ROUND_UP(val.val, d.val); | ||
197 | } | ||
198 | |||
199 | static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, | ||
200 | uint_fixed_16_16_t mul) | ||
201 | { | ||
202 | uint64_t intermediate_val; | ||
203 | |||
204 | intermediate_val = (uint64_t) val * mul.val; | ||
205 | intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); | ||
206 | WARN_ON(intermediate_val > U32_MAX); | ||
207 | return (uint32_t) intermediate_val; | ||
208 | } | ||
209 | |||
210 | static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, | ||
211 | uint_fixed_16_16_t mul) | ||
212 | { | ||
213 | uint64_t intermediate_val; | ||
214 | |||
215 | intermediate_val = (uint64_t) val.val * mul.val; | ||
216 | intermediate_val = intermediate_val >> 16; | ||
217 | return clamp_u64_to_fixed16(intermediate_val); | ||
218 | } | ||
219 | |||
220 | static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d) | ||
221 | { | ||
222 | uint64_t interm_val; | ||
223 | |||
224 | interm_val = (uint64_t)val << 16; | ||
225 | interm_val = DIV_ROUND_UP_ULL(interm_val, d); | ||
226 | return clamp_u64_to_fixed16(interm_val); | ||
227 | } | ||
228 | |||
229 | static inline uint32_t div_round_up_u32_fixed16(uint32_t val, | ||
230 | uint_fixed_16_16_t d) | ||
231 | { | ||
232 | uint64_t interm_val; | ||
233 | |||
234 | interm_val = (uint64_t)val << 16; | ||
235 | interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); | ||
236 | WARN_ON(interm_val > U32_MAX); | ||
237 | return (uint32_t) interm_val; | ||
238 | } | ||
239 | |||
240 | static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, | ||
241 | uint_fixed_16_16_t mul) | ||
242 | { | ||
243 | uint64_t intermediate_val; | ||
244 | |||
245 | intermediate_val = (uint64_t) val * mul.val; | ||
246 | return clamp_u64_to_fixed16(intermediate_val); | ||
247 | } | ||
248 | |||
249 | static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, | ||
250 | uint_fixed_16_16_t add2) | ||
251 | { | ||
252 | uint64_t interm_sum; | ||
253 | |||
254 | interm_sum = (uint64_t) add1.val + add2.val; | ||
255 | return clamp_u64_to_fixed16(interm_sum); | ||
256 | } | ||
257 | |||
258 | static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, | ||
259 | uint32_t add2) | ||
260 | { | ||
261 | uint64_t interm_sum; | ||
262 | uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2); | ||
263 | |||
264 | interm_sum = (uint64_t) add1.val + interm_add2.val; | ||
265 | return clamp_u64_to_fixed16(interm_sum); | ||
266 | } | ||
267 | |||
268 | enum hpd_pin { | 131 | enum hpd_pin { |
269 | HPD_NONE = 0, | 132 | HPD_NONE = 0, |
270 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ | 133 | HPD_TV = HPD_NONE, /* TV is known to be unreliable */ |
@@ -283,7 +146,8 @@ enum hpd_pin { | |||
283 | #define for_each_hpd_pin(__pin) \ | 146 | #define for_each_hpd_pin(__pin) \ |
284 | for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) | 147 | for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) |
285 | 148 | ||
286 | #define HPD_STORM_DEFAULT_THRESHOLD 5 | 149 | /* Threshold == 5 for long IRQs, 50 for short */ |
150 | #define HPD_STORM_DEFAULT_THRESHOLD 50 | ||
287 | 151 | ||
288 | struct i915_hotplug { | 152 | struct i915_hotplug { |
289 | struct work_struct hotplug_work; | 153 | struct work_struct hotplug_work; |
@@ -308,6 +172,8 @@ struct i915_hotplug { | |||
308 | bool poll_enabled; | 172 | bool poll_enabled; |
309 | 173 | ||
310 | unsigned int hpd_storm_threshold; | 174 | unsigned int hpd_storm_threshold; |
175 | /* Whether or not to count short HPD IRQs in HPD storms */ | ||
176 | u8 hpd_short_storm_enabled; | ||
311 | 177 | ||
312 | /* | 178 | /* |
313 | * if we get a HPD irq from DP and a HPD irq from non-DP | 179 | * if we get a HPD irq from DP and a HPD irq from non-DP |
@@ -465,8 +331,10 @@ struct drm_i915_display_funcs { | |||
465 | struct intel_csr { | 331 | struct intel_csr { |
466 | struct work_struct work; | 332 | struct work_struct work; |
467 | const char *fw_path; | 333 | const char *fw_path; |
334 | uint32_t required_version; | ||
335 | uint32_t max_fw_size; /* bytes */ | ||
468 | uint32_t *dmc_payload; | 336 | uint32_t *dmc_payload; |
469 | uint32_t dmc_fw_size; | 337 | uint32_t dmc_fw_size; /* dwords */ |
470 | uint32_t version; | 338 | uint32_t version; |
471 | uint32_t mmio_count; | 339 | uint32_t mmio_count; |
472 | i915_reg_t mmioaddr[8]; | 340 | i915_reg_t mmioaddr[8]; |
@@ -546,6 +414,8 @@ struct intel_fbc { | |||
546 | int adjusted_y; | 414 | int adjusted_y; |
547 | 415 | ||
548 | int y; | 416 | int y; |
417 | |||
418 | uint16_t pixel_blend_mode; | ||
549 | } plane; | 419 | } plane; |
550 | 420 | ||
551 | struct { | 421 | struct { |
@@ -630,7 +500,6 @@ struct i915_psr { | |||
630 | bool sink_psr2_support; | 500 | bool sink_psr2_support; |
631 | bool link_standby; | 501 | bool link_standby; |
632 | bool colorimetry_support; | 502 | bool colorimetry_support; |
633 | bool alpm; | ||
634 | bool psr2_enabled; | 503 | bool psr2_enabled; |
635 | u8 sink_sync_latency; | 504 | u8 sink_sync_latency; |
636 | ktime_t last_entry_attempt; | 505 | ktime_t last_entry_attempt; |
@@ -918,6 +787,11 @@ struct i915_power_well_desc { | |||
918 | /* The pw is backing the VGA functionality */ | 787 | /* The pw is backing the VGA functionality */ |
919 | bool has_vga:1; | 788 | bool has_vga:1; |
920 | bool has_fuses:1; | 789 | bool has_fuses:1; |
790 | /* | ||
791 | * The pw is for an ICL+ TypeC PHY port in | ||
792 | * Thunderbolt mode. | ||
793 | */ | ||
794 | bool is_tc_tbt:1; | ||
921 | } hsw; | 795 | } hsw; |
922 | }; | 796 | }; |
923 | const struct i915_power_well_ops *ops; | 797 | const struct i915_power_well_ops *ops; |
@@ -1042,17 +916,6 @@ struct i915_gem_mm { | |||
1042 | 916 | ||
1043 | #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ | 917 | #define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */ |
1044 | 918 | ||
1045 | #define DP_AUX_A 0x40 | ||
1046 | #define DP_AUX_B 0x10 | ||
1047 | #define DP_AUX_C 0x20 | ||
1048 | #define DP_AUX_D 0x30 | ||
1049 | #define DP_AUX_E 0x50 | ||
1050 | #define DP_AUX_F 0x60 | ||
1051 | |||
1052 | #define DDC_PIN_B 0x05 | ||
1053 | #define DDC_PIN_C 0x04 | ||
1054 | #define DDC_PIN_D 0x06 | ||
1055 | |||
1056 | struct ddi_vbt_port_info { | 919 | struct ddi_vbt_port_info { |
1057 | int max_tmds_clock; | 920 | int max_tmds_clock; |
1058 | 921 | ||
@@ -1099,6 +962,7 @@ struct intel_vbt_data { | |||
1099 | unsigned int panel_type:4; | 962 | unsigned int panel_type:4; |
1100 | int lvds_ssc_freq; | 963 | int lvds_ssc_freq; |
1101 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ | 964 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
965 | enum drm_panel_orientation orientation; | ||
1102 | 966 | ||
1103 | enum drrs_support_type drrs_type; | 967 | enum drrs_support_type drrs_type; |
1104 | 968 | ||
@@ -1144,6 +1008,7 @@ struct intel_vbt_data { | |||
1144 | u8 *data; | 1008 | u8 *data; |
1145 | const u8 *sequence[MIPI_SEQ_MAX]; | 1009 | const u8 *sequence[MIPI_SEQ_MAX]; |
1146 | u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ | 1010 | u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ |
1011 | enum drm_panel_orientation orientation; | ||
1147 | } dsi; | 1012 | } dsi; |
1148 | 1013 | ||
1149 | int crt_ddc_pin; | 1014 | int crt_ddc_pin; |
@@ -1240,9 +1105,9 @@ struct skl_ddb_values { | |||
1240 | }; | 1105 | }; |
1241 | 1106 | ||
1242 | struct skl_wm_level { | 1107 | struct skl_wm_level { |
1243 | bool plane_en; | ||
1244 | uint16_t plane_res_b; | 1108 | uint16_t plane_res_b; |
1245 | uint8_t plane_res_l; | 1109 | uint8_t plane_res_l; |
1110 | bool plane_en; | ||
1246 | }; | 1111 | }; |
1247 | 1112 | ||
1248 | /* Stores plane specific WM parameters */ | 1113 | /* Stores plane specific WM parameters */ |
@@ -1520,30 +1385,12 @@ struct i915_oa_ops { | |||
1520 | bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); | 1385 | bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); |
1521 | 1386 | ||
1522 | /** | 1387 | /** |
1523 | * @init_oa_buffer: Resets the head and tail pointers of the | ||
1524 | * circular buffer for periodic OA reports. | ||
1525 | * | ||
1526 | * Called when first opening a stream for OA metrics, but also may be | ||
1527 | * called in response to an OA buffer overflow or other error | ||
1528 | * condition. | ||
1529 | * | ||
1530 | * Note it may be necessary to clear the full OA buffer here as part of | ||
1531 | * maintaining the invariable that new reports must be written to | ||
1532 | * zeroed memory for us to be able to reliable detect if an expected | ||
1533 | * report has not yet landed in memory. (At least on Haswell the OA | ||
1534 | * buffer tail pointer is not synchronized with reports being visible | ||
1535 | * to the CPU) | ||
1536 | */ | ||
1537 | void (*init_oa_buffer)(struct drm_i915_private *dev_priv); | ||
1538 | |||
1539 | /** | ||
1540 | * @enable_metric_set: Selects and applies any MUX configuration to set | 1388 | * @enable_metric_set: Selects and applies any MUX configuration to set |
1541 | * up the Boolean and Custom (B/C) counters that are part of the | 1389 | * up the Boolean and Custom (B/C) counters that are part of the |
1542 | * counter reports being sampled. May apply system constraints such as | 1390 | * counter reports being sampled. May apply system constraints such as |
1543 | * disabling EU clock gating as required. | 1391 | * disabling EU clock gating as required. |
1544 | */ | 1392 | */ |
1545 | int (*enable_metric_set)(struct drm_i915_private *dev_priv, | 1393 | int (*enable_metric_set)(struct i915_perf_stream *stream); |
1546 | const struct i915_oa_config *oa_config); | ||
1547 | 1394 | ||
1548 | /** | 1395 | /** |
1549 | * @disable_metric_set: Remove system constraints associated with using | 1396 | * @disable_metric_set: Remove system constraints associated with using |
@@ -1554,12 +1401,12 @@ struct i915_oa_ops { | |||
1554 | /** | 1401 | /** |
1555 | * @oa_enable: Enable periodic sampling | 1402 | * @oa_enable: Enable periodic sampling |
1556 | */ | 1403 | */ |
1557 | void (*oa_enable)(struct drm_i915_private *dev_priv); | 1404 | void (*oa_enable)(struct i915_perf_stream *stream); |
1558 | 1405 | ||
1559 | /** | 1406 | /** |
1560 | * @oa_disable: Disable periodic sampling | 1407 | * @oa_disable: Disable periodic sampling |
1561 | */ | 1408 | */ |
1562 | void (*oa_disable)(struct drm_i915_private *dev_priv); | 1409 | void (*oa_disable)(struct i915_perf_stream *stream); |
1563 | 1410 | ||
1564 | /** | 1411 | /** |
1565 | * @read: Copy data from the circular OA buffer into a given userspace | 1412 | * @read: Copy data from the circular OA buffer into a given userspace |
@@ -1948,7 +1795,6 @@ struct drm_i915_private { | |||
1948 | 1795 | ||
1949 | struct dram_info { | 1796 | struct dram_info { |
1950 | bool valid; | 1797 | bool valid; |
1951 | bool valid_dimm; | ||
1952 | bool is_16gb_dimm; | 1798 | bool is_16gb_dimm; |
1953 | u8 num_channels; | 1799 | u8 num_channels; |
1954 | enum dram_rank { | 1800 | enum dram_rank { |
@@ -2323,6 +2169,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) | |||
2323 | (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ | 2169 | (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ |
2324 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) | 2170 | (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) |
2325 | 2171 | ||
2172 | bool i915_sg_trim(struct sg_table *orig_st); | ||
2173 | |||
2326 | static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) | 2174 | static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg) |
2327 | { | 2175 | { |
2328 | unsigned int page_sizes; | 2176 | unsigned int page_sizes; |
@@ -2368,20 +2216,12 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2368 | #define REVID_FOREVER 0xff | 2216 | #define REVID_FOREVER 0xff |
2369 | #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) | 2217 | #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) |
2370 | 2218 | ||
2371 | #define GEN_FOREVER (0) | ||
2372 | |||
2373 | #define INTEL_GEN_MASK(s, e) ( \ | 2219 | #define INTEL_GEN_MASK(s, e) ( \ |
2374 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ | 2220 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ |
2375 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ | 2221 | BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ |
2376 | GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ | 2222 | GENMASK((e) - 1, (s) - 1)) |
2377 | (s) != GEN_FOREVER ? (s) - 1 : 0) \ | ||
2378 | ) | ||
2379 | 2223 | ||
2380 | /* | 2224 | /* Returns true if Gen is in inclusive range [Start, End] */ |
2381 | * Returns true if Gen is in inclusive range [Start, End]. | ||
2382 | * | ||
2383 | * Use GEN_FOREVER for unbound start and or end. | ||
2384 | */ | ||
2385 | #define IS_GEN(dev_priv, s, e) \ | 2225 | #define IS_GEN(dev_priv, s, e) \ |
2386 | (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) | 2226 | (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) |
2387 | 2227 | ||
@@ -2462,6 +2302,8 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2462 | #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ | 2302 | #define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \ |
2463 | INTEL_DEVID(dev_priv) == 0x5915 || \ | 2303 | INTEL_DEVID(dev_priv) == 0x5915 || \ |
2464 | INTEL_DEVID(dev_priv) == 0x591E) | 2304 | INTEL_DEVID(dev_priv) == 0x591E) |
2305 | #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ | ||
2306 | INTEL_DEVID(dev_priv) == 0x87C0) | ||
2465 | #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ | 2307 | #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
2466 | (dev_priv)->info.gt == 2) | 2308 | (dev_priv)->info.gt == 2) |
2467 | #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ | 2309 | #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ |
@@ -2593,9 +2435,14 @@ intel_info(const struct drm_i915_private *dev_priv) | |||
2593 | 2435 | ||
2594 | #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) | 2436 | #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) |
2595 | 2437 | ||
2596 | #define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) | 2438 | #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt) |
2597 | #define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) | 2439 | #define HAS_PPGTT(dev_priv) \ |
2598 | #define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) | 2440 | (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) |
2441 | #define HAS_FULL_PPGTT(dev_priv) \ | ||
2442 | (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) | ||
2443 | #define HAS_FULL_48BIT_PPGTT(dev_priv) \ | ||
2444 | (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL) | ||
2445 | |||
2599 | #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ | 2446 | #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ |
2600 | GEM_BUG_ON((sizes) == 0); \ | 2447 | GEM_BUG_ON((sizes) == 0); \ |
2601 | ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ | 2448 | ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \ |
@@ -2743,9 +2590,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv) | |||
2743 | return IS_BROXTON(dev_priv) && intel_vtd_active(); | 2590 | return IS_BROXTON(dev_priv) && intel_vtd_active(); |
2744 | } | 2591 | } |
2745 | 2592 | ||
2746 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | ||
2747 | int enable_ppgtt); | ||
2748 | |||
2749 | /* i915_drv.c */ | 2593 | /* i915_drv.c */ |
2750 | void __printf(3, 4) | 2594 | void __printf(3, 4) |
2751 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, | 2595 | __i915_printk(struct drm_i915_private *dev_priv, const char *level, |
@@ -3230,7 +3074,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, | |||
3230 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, | 3074 | int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, |
3231 | unsigned int flags, | 3075 | unsigned int flags, |
3232 | const struct i915_sched_attr *attr); | 3076 | const struct i915_sched_attr *attr); |
3233 | #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX | 3077 | #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) |
3234 | 3078 | ||
3235 | int __must_check | 3079 | int __must_check |
3236 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); | 3080 | i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); |
@@ -3462,6 +3306,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, | |||
3462 | enum port port); | 3306 | enum port port); |
3463 | bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, | 3307 | bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, |
3464 | enum port port); | 3308 | enum port port); |
3309 | enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); | ||
3465 | 3310 | ||
3466 | /* intel_acpi.c */ | 3311 | /* intel_acpi.c */ |
3467 | #ifdef CONFIG_ACPI | 3312 | #ifdef CONFIG_ACPI |
@@ -3483,8 +3328,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) | |||
3483 | extern void intel_modeset_init_hw(struct drm_device *dev); | 3328 | extern void intel_modeset_init_hw(struct drm_device *dev); |
3484 | extern int intel_modeset_init(struct drm_device *dev); | 3329 | extern int intel_modeset_init(struct drm_device *dev); |
3485 | extern void intel_modeset_cleanup(struct drm_device *dev); | 3330 | extern void intel_modeset_cleanup(struct drm_device *dev); |
3486 | extern int intel_connector_register(struct drm_connector *); | ||
3487 | extern void intel_connector_unregister(struct drm_connector *); | ||
3488 | extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, | 3331 | extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, |
3489 | bool state); | 3332 | bool state); |
3490 | extern void intel_display_resume(struct drm_device *dev); | 3333 | extern void intel_display_resume(struct drm_device *dev); |
@@ -3584,6 +3427,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, | |||
3584 | void vlv_phy_reset_lanes(struct intel_encoder *encoder, | 3427 | void vlv_phy_reset_lanes(struct intel_encoder *encoder, |
3585 | const struct intel_crtc_state *old_crtc_state); | 3428 | const struct intel_crtc_state *old_crtc_state); |
3586 | 3429 | ||
3430 | /* intel_combo_phy.c */ | ||
3431 | void icl_combo_phys_init(struct drm_i915_private *dev_priv); | ||
3432 | void icl_combo_phys_uninit(struct drm_i915_private *dev_priv); | ||
3433 | void cnl_combo_phys_init(struct drm_i915_private *dev_priv); | ||
3434 | void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv); | ||
3435 | |||
3587 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); | 3436 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); |
3588 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); | 3437 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); |
3589 | u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, | 3438 | u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h new file mode 100644 index 000000000000..591dd89ba7af --- /dev/null +++ b/drivers/gpu/drm/i915/i915_fixed.h | |||
@@ -0,0 +1,143 @@ | |||
1 | /* SPDX-License-Identifier: MIT */ | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corporation | ||
4 | */ | ||
5 | |||
6 | #ifndef _I915_FIXED_H_ | ||
7 | #define _I915_FIXED_H_ | ||
8 | |||
9 | typedef struct { | ||
10 | u32 val; | ||
11 | } uint_fixed_16_16_t; | ||
12 | |||
13 | #define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX }) | ||
14 | |||
15 | static inline bool is_fixed16_zero(uint_fixed_16_16_t val) | ||
16 | { | ||
17 | return val.val == 0; | ||
18 | } | ||
19 | |||
20 | static inline uint_fixed_16_16_t u32_to_fixed16(u32 val) | ||
21 | { | ||
22 | uint_fixed_16_16_t fp = { .val = val << 16 }; | ||
23 | |||
24 | WARN_ON(val > U16_MAX); | ||
25 | |||
26 | return fp; | ||
27 | } | ||
28 | |||
29 | static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp) | ||
30 | { | ||
31 | return DIV_ROUND_UP(fp.val, 1 << 16); | ||
32 | } | ||
33 | |||
34 | static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp) | ||
35 | { | ||
36 | return fp.val >> 16; | ||
37 | } | ||
38 | |||
39 | static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1, | ||
40 | uint_fixed_16_16_t min2) | ||
41 | { | ||
42 | uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) }; | ||
43 | |||
44 | return min; | ||
45 | } | ||
46 | |||
47 | static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, | ||
48 | uint_fixed_16_16_t max2) | ||
49 | { | ||
50 | uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) }; | ||
51 | |||
52 | return max; | ||
53 | } | ||
54 | |||
55 | static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val) | ||
56 | { | ||
57 | uint_fixed_16_16_t fp = { .val = (u32)val }; | ||
58 | |||
59 | WARN_ON(val > U32_MAX); | ||
60 | |||
61 | return fp; | ||
62 | } | ||
63 | |||
64 | static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val, | ||
65 | uint_fixed_16_16_t d) | ||
66 | { | ||
67 | return DIV_ROUND_UP(val.val, d.val); | ||
68 | } | ||
69 | |||
70 | static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul) | ||
71 | { | ||
72 | u64 tmp; | ||
73 | |||
74 | tmp = (u64)val * mul.val; | ||
75 | tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16); | ||
76 | WARN_ON(tmp > U32_MAX); | ||
77 | |||
78 | return (u32)tmp; | ||
79 | } | ||
80 | |||
81 | static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, | ||
82 | uint_fixed_16_16_t mul) | ||
83 | { | ||
84 | u64 tmp; | ||
85 | |||
86 | tmp = (u64)val.val * mul.val; | ||
87 | tmp = tmp >> 16; | ||
88 | |||
89 | return clamp_u64_to_fixed16(tmp); | ||
90 | } | ||
91 | |||
92 | static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d) | ||
93 | { | ||
94 | u64 tmp; | ||
95 | |||
96 | tmp = (u64)val << 16; | ||
97 | tmp = DIV_ROUND_UP_ULL(tmp, d); | ||
98 | |||
99 | return clamp_u64_to_fixed16(tmp); | ||
100 | } | ||
101 | |||
102 | static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d) | ||
103 | { | ||
104 | u64 tmp; | ||
105 | |||
106 | tmp = (u64)val << 16; | ||
107 | tmp = DIV_ROUND_UP_ULL(tmp, d.val); | ||
108 | WARN_ON(tmp > U32_MAX); | ||
109 | |||
110 | return (u32)tmp; | ||
111 | } | ||
112 | |||
113 | static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul) | ||
114 | { | ||
115 | u64 tmp; | ||
116 | |||
117 | tmp = (u64)val * mul.val; | ||
118 | |||
119 | return clamp_u64_to_fixed16(tmp); | ||
120 | } | ||
121 | |||
122 | static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1, | ||
123 | uint_fixed_16_16_t add2) | ||
124 | { | ||
125 | u64 tmp; | ||
126 | |||
127 | tmp = (u64)add1.val + add2.val; | ||
128 | |||
129 | return clamp_u64_to_fixed16(tmp); | ||
130 | } | ||
131 | |||
132 | static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1, | ||
133 | u32 add2) | ||
134 | { | ||
135 | uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2); | ||
136 | u64 tmp; | ||
137 | |||
138 | tmp = (u64)add1.val + tmp_add2.val; | ||
139 | |||
140 | return clamp_u64_to_fixed16(tmp); | ||
141 | } | ||
142 | |||
143 | #endif /* _I915_FIXED_H_ */ | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0c8aa57ce83b..c55b1f75c980 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
1740 | */ | 1740 | */ |
1741 | err = i915_gem_object_wait(obj, | 1741 | err = i915_gem_object_wait(obj, |
1742 | I915_WAIT_INTERRUPTIBLE | | 1742 | I915_WAIT_INTERRUPTIBLE | |
1743 | I915_WAIT_PRIORITY | | ||
1743 | (write_domain ? I915_WAIT_ALL : 0), | 1744 | (write_domain ? I915_WAIT_ALL : 0), |
1744 | MAX_SCHEDULE_TIMEOUT, | 1745 | MAX_SCHEDULE_TIMEOUT, |
1745 | to_rps_client(file)); | 1746 | to_rps_client(file)); |
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj) | |||
2381 | invalidate_mapping_pages(mapping, 0, (loff_t)-1); | 2382 | invalidate_mapping_pages(mapping, 0, (loff_t)-1); |
2382 | } | 2383 | } |
2383 | 2384 | ||
2385 | /* | ||
2386 | * Move pages to appropriate lru and release the pagevec, decrementing the | ||
2387 | * ref count of those pages. | ||
2388 | */ | ||
2389 | static void check_release_pagevec(struct pagevec *pvec) | ||
2390 | { | ||
2391 | check_move_unevictable_pages(pvec); | ||
2392 | __pagevec_release(pvec); | ||
2393 | cond_resched(); | ||
2394 | } | ||
2395 | |||
2384 | static void | 2396 | static void |
2385 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, | 2397 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, |
2386 | struct sg_table *pages) | 2398 | struct sg_table *pages) |
2387 | { | 2399 | { |
2388 | struct sgt_iter sgt_iter; | 2400 | struct sgt_iter sgt_iter; |
2401 | struct pagevec pvec; | ||
2389 | struct page *page; | 2402 | struct page *page; |
2390 | 2403 | ||
2391 | __i915_gem_object_release_shmem(obj, pages, true); | 2404 | __i915_gem_object_release_shmem(obj, pages, true); |
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, | |||
2395 | if (i915_gem_object_needs_bit17_swizzle(obj)) | 2408 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
2396 | i915_gem_object_save_bit_17_swizzle(obj, pages); | 2409 | i915_gem_object_save_bit_17_swizzle(obj, pages); |
2397 | 2410 | ||
2411 | mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); | ||
2412 | |||
2413 | pagevec_init(&pvec); | ||
2398 | for_each_sgt_page(page, sgt_iter, pages) { | 2414 | for_each_sgt_page(page, sgt_iter, pages) { |
2399 | if (obj->mm.dirty) | 2415 | if (obj->mm.dirty) |
2400 | set_page_dirty(page); | 2416 | set_page_dirty(page); |
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, | |||
2402 | if (obj->mm.madv == I915_MADV_WILLNEED) | 2418 | if (obj->mm.madv == I915_MADV_WILLNEED) |
2403 | mark_page_accessed(page); | 2419 | mark_page_accessed(page); |
2404 | 2420 | ||
2405 | put_page(page); | 2421 | if (!pagevec_add(&pvec, page)) |
2422 | check_release_pagevec(&pvec); | ||
2406 | } | 2423 | } |
2424 | if (pagevec_count(&pvec)) | ||
2425 | check_release_pagevec(&pvec); | ||
2407 | obj->mm.dirty = false; | 2426 | obj->mm.dirty = false; |
2408 | 2427 | ||
2409 | sg_free_table(pages); | 2428 | sg_free_table(pages); |
@@ -2483,7 +2502,7 @@ unlock: | |||
2483 | mutex_unlock(&obj->mm.lock); | 2502 | mutex_unlock(&obj->mm.lock); |
2484 | } | 2503 | } |
2485 | 2504 | ||
2486 | static bool i915_sg_trim(struct sg_table *orig_st) | 2505 | bool i915_sg_trim(struct sg_table *orig_st) |
2487 | { | 2506 | { |
2488 | struct sg_table new_st; | 2507 | struct sg_table new_st; |
2489 | struct scatterlist *sg, *new_sg; | 2508 | struct scatterlist *sg, *new_sg; |
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) | |||
2524 | unsigned long last_pfn = 0; /* suppress gcc warning */ | 2543 | unsigned long last_pfn = 0; /* suppress gcc warning */ |
2525 | unsigned int max_segment = i915_sg_segment_size(); | 2544 | unsigned int max_segment = i915_sg_segment_size(); |
2526 | unsigned int sg_page_sizes; | 2545 | unsigned int sg_page_sizes; |
2546 | struct pagevec pvec; | ||
2527 | gfp_t noreclaim; | 2547 | gfp_t noreclaim; |
2528 | int ret; | 2548 | int ret; |
2529 | 2549 | ||
@@ -2559,6 +2579,7 @@ rebuild_st: | |||
2559 | * Fail silently without starting the shrinker | 2579 | * Fail silently without starting the shrinker |
2560 | */ | 2580 | */ |
2561 | mapping = obj->base.filp->f_mapping; | 2581 | mapping = obj->base.filp->f_mapping; |
2582 | mapping_set_unevictable(mapping); | ||
2562 | noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); | 2583 | noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); |
2563 | noreclaim |= __GFP_NORETRY | __GFP_NOWARN; | 2584 | noreclaim |= __GFP_NORETRY | __GFP_NOWARN; |
2564 | 2585 | ||
@@ -2573,6 +2594,7 @@ rebuild_st: | |||
2573 | gfp_t gfp = noreclaim; | 2594 | gfp_t gfp = noreclaim; |
2574 | 2595 | ||
2575 | do { | 2596 | do { |
2597 | cond_resched(); | ||
2576 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); | 2598 | page = shmem_read_mapping_page_gfp(mapping, i, gfp); |
2577 | if (likely(!IS_ERR(page))) | 2599 | if (likely(!IS_ERR(page))) |
2578 | break; | 2600 | break; |
@@ -2583,7 +2605,6 @@ rebuild_st: | |||
2583 | } | 2605 | } |
2584 | 2606 | ||
2585 | i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); | 2607 | i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); |
2586 | cond_resched(); | ||
2587 | 2608 | ||
2588 | /* | 2609 | /* |
2589 | * We've tried hard to allocate the memory by reaping | 2610 | * We've tried hard to allocate the memory by reaping |
@@ -2673,8 +2694,14 @@ rebuild_st: | |||
2673 | err_sg: | 2694 | err_sg: |
2674 | sg_mark_end(sg); | 2695 | sg_mark_end(sg); |
2675 | err_pages: | 2696 | err_pages: |
2676 | for_each_sgt_page(page, sgt_iter, st) | 2697 | mapping_clear_unevictable(mapping); |
2677 | put_page(page); | 2698 | pagevec_init(&pvec); |
2699 | for_each_sgt_page(page, sgt_iter, st) { | ||
2700 | if (!pagevec_add(&pvec, page)) | ||
2701 | check_release_pagevec(&pvec); | ||
2702 | } | ||
2703 | if (pagevec_count(&pvec)) | ||
2704 | check_release_pagevec(&pvec); | ||
2678 | sg_free_table(st); | 2705 | sg_free_table(st); |
2679 | kfree(st); | 2706 | kfree(st); |
2680 | 2707 | ||
@@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu) | |||
3530 | struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); | 3557 | struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); |
3531 | struct drm_i915_private *i915 = s->i915; | 3558 | struct drm_i915_private *i915 = s->i915; |
3532 | 3559 | ||
3560 | destroy_rcu_head(&s->rcu); | ||
3561 | |||
3533 | if (same_epoch(i915, s->epoch)) { | 3562 | if (same_epoch(i915, s->epoch)) { |
3534 | INIT_WORK(&s->work, __sleep_work); | 3563 | INIT_WORK(&s->work, __sleep_work); |
3535 | queue_work(i915->wq, &s->work); | 3564 | queue_work(i915->wq, &s->work); |
@@ -3646,6 +3675,7 @@ out_rearm: | |||
3646 | if (same_epoch(dev_priv, epoch)) { | 3675 | if (same_epoch(dev_priv, epoch)) { |
3647 | struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); | 3676 | struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); |
3648 | if (s) { | 3677 | if (s) { |
3678 | init_rcu_head(&s->rcu); | ||
3649 | s->i915 = dev_priv; | 3679 | s->i915 = dev_priv; |
3650 | s->epoch = epoch; | 3680 | s->epoch = epoch; |
3651 | call_rcu(&s->rcu, __sleep_rcu); | 3681 | call_rcu(&s->rcu, __sleep_rcu); |
@@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
3743 | start = ktime_get(); | 3773 | start = ktime_get(); |
3744 | 3774 | ||
3745 | ret = i915_gem_object_wait(obj, | 3775 | ret = i915_gem_object_wait(obj, |
3746 | I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, | 3776 | I915_WAIT_INTERRUPTIBLE | |
3777 | I915_WAIT_PRIORITY | | ||
3778 | I915_WAIT_ALL, | ||
3747 | to_wait_timeout(args->timeout_ns), | 3779 | to_wait_timeout(args->timeout_ns), |
3748 | to_rps_client(file)); | 3780 | to_rps_client(file)); |
3749 | 3781 | ||
@@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
4710 | INIT_LIST_HEAD(&obj->lut_list); | 4742 | INIT_LIST_HEAD(&obj->lut_list); |
4711 | INIT_LIST_HEAD(&obj->batch_pool_link); | 4743 | INIT_LIST_HEAD(&obj->batch_pool_link); |
4712 | 4744 | ||
4745 | init_rcu_head(&obj->rcu); | ||
4746 | |||
4713 | obj->ops = ops; | 4747 | obj->ops = ops; |
4714 | 4748 | ||
4715 | reservation_object_init(&obj->__builtin_resv); | 4749 | reservation_object_init(&obj->__builtin_resv); |
@@ -4977,6 +5011,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) | |||
4977 | struct drm_i915_private *i915 = to_i915(obj->base.dev); | 5011 | struct drm_i915_private *i915 = to_i915(obj->base.dev); |
4978 | 5012 | ||
4979 | /* | 5013 | /* |
5014 | * We reuse obj->rcu for the freed list, so we had better not treat | ||
5015 | * it like a rcu_head from this point forwards. And we expect all | ||
5016 | * objects to be freed via this path. | ||
5017 | */ | ||
5018 | destroy_rcu_head(&obj->rcu); | ||
5019 | |||
5020 | /* | ||
4980 | * Since we require blocking on struct_mutex to unbind the freed | 5021 | * Since we require blocking on struct_mutex to unbind the freed |
4981 | * object from the GPU before releasing resources back to the | 5022 | * object from the GPU before releasing resources back to the |
4982 | * system, we can not do that directly from the RCU callback (which may | 5023 | * system, we can not do that directly from the RCU callback (which may |
@@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) | |||
5293 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? | 5334 | I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ? |
5294 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); | 5335 | LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); |
5295 | 5336 | ||
5296 | if (HAS_PCH_NOP(dev_priv)) { | ||
5297 | if (IS_IVYBRIDGE(dev_priv)) { | ||
5298 | u32 temp = I915_READ(GEN7_MSG_CTL); | ||
5299 | temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); | ||
5300 | I915_WRITE(GEN7_MSG_CTL, temp); | ||
5301 | } else if (INTEL_GEN(dev_priv) >= 7) { | ||
5302 | u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT); | ||
5303 | temp &= ~RESET_PCH_HANDSHAKE_ENABLE; | ||
5304 | I915_WRITE(HSW_NDE_RSTWRN_OPT, temp); | ||
5305 | } | ||
5306 | } | ||
5307 | |||
5308 | intel_gt_workarounds_apply(dev_priv); | 5337 | intel_gt_workarounds_apply(dev_priv); |
5309 | 5338 | ||
5310 | i915_gem_init_swizzling(dev_priv); | 5339 | i915_gem_init_swizzling(dev_priv); |
@@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, | |||
5951 | * the bits. | 5980 | * the bits. |
5952 | */ | 5981 | */ |
5953 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > | 5982 | BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > |
5954 | sizeof(atomic_t) * BITS_PER_BYTE); | 5983 | BITS_PER_TYPE(atomic_t)); |
5955 | 5984 | ||
5956 | if (old) { | 5985 | if (old) { |
5957 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); | 5986 | WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits)); |
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 599c4f6eb1ea..b0e4b976880c 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h | |||
@@ -47,17 +47,19 @@ struct drm_i915_private; | |||
47 | #define GEM_DEBUG_DECL(var) var | 47 | #define GEM_DEBUG_DECL(var) var |
48 | #define GEM_DEBUG_EXEC(expr) expr | 48 | #define GEM_DEBUG_EXEC(expr) expr |
49 | #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) | 49 | #define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr) |
50 | #define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr) | ||
50 | 51 | ||
51 | #else | 52 | #else |
52 | 53 | ||
53 | #define GEM_SHOW_DEBUG() (0) | 54 | #define GEM_SHOW_DEBUG() (0) |
54 | 55 | ||
55 | #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) | 56 | #define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) |
56 | #define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0) | 57 | #define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); }) |
57 | 58 | ||
58 | #define GEM_DEBUG_DECL(var) | 59 | #define GEM_DEBUG_DECL(var) |
59 | #define GEM_DEBUG_EXEC(expr) do { } while (0) | 60 | #define GEM_DEBUG_EXEC(expr) do { } while (0) |
60 | #define GEM_DEBUG_BUG_ON(expr) | 61 | #define GEM_DEBUG_BUG_ON(expr) |
62 | #define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; }) | ||
61 | #endif | 63 | #endif |
62 | 64 | ||
63 | #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) | 65 | #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f772593b99ab..b97963db0287 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, | |||
337 | kref_init(&ctx->ref); | 337 | kref_init(&ctx->ref); |
338 | list_add_tail(&ctx->link, &dev_priv->contexts.list); | 338 | list_add_tail(&ctx->link, &dev_priv->contexts.list); |
339 | ctx->i915 = dev_priv; | 339 | ctx->i915 = dev_priv; |
340 | ctx->sched.priority = I915_PRIORITY_NORMAL; | 340 | ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); |
341 | 341 | ||
342 | for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { | 342 | for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { |
343 | struct intel_context *ce = &ctx->__engine[n]; | 343 | struct intel_context *ce = &ctx->__engine[n]; |
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, | |||
414 | if (IS_ERR(ctx)) | 414 | if (IS_ERR(ctx)) |
415 | return ctx; | 415 | return ctx; |
416 | 416 | ||
417 | if (USES_FULL_PPGTT(dev_priv)) { | 417 | if (HAS_FULL_PPGTT(dev_priv)) { |
418 | struct i915_hw_ppgtt *ppgtt; | 418 | struct i915_hw_ppgtt *ppgtt; |
419 | 419 | ||
420 | ppgtt = i915_ppgtt_create(dev_priv, file_priv); | 420 | ppgtt = i915_ppgtt_create(dev_priv, file_priv); |
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) | |||
457 | if (ret) | 457 | if (ret) |
458 | return ERR_PTR(ret); | 458 | return ERR_PTR(ret); |
459 | 459 | ||
460 | ctx = __create_hw_context(to_i915(dev), NULL); | 460 | ctx = i915_gem_create_context(to_i915(dev), NULL); |
461 | if (IS_ERR(ctx)) | 461 | if (IS_ERR(ctx)) |
462 | goto out; | 462 | goto out; |
463 | 463 | ||
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) | |||
504 | } | 504 | } |
505 | 505 | ||
506 | i915_gem_context_clear_bannable(ctx); | 506 | i915_gem_context_clear_bannable(ctx); |
507 | ctx->sched.priority = prio; | 507 | ctx->sched.priority = I915_USER_PRIORITY(prio); |
508 | ctx->ring_size = PAGE_SIZE; | 508 | ctx->ring_size = PAGE_SIZE; |
509 | 509 | ||
510 | GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); | 510 | GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); |
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, | |||
879 | args->value = i915_gem_context_is_bannable(ctx); | 879 | args->value = i915_gem_context_is_bannable(ctx); |
880 | break; | 880 | break; |
881 | case I915_CONTEXT_PARAM_PRIORITY: | 881 | case I915_CONTEXT_PARAM_PRIORITY: |
882 | args->value = ctx->sched.priority; | 882 | args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; |
883 | break; | 883 | break; |
884 | default: | 884 | default: |
885 | ret = -EINVAL; | 885 | ret = -EINVAL; |
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, | |||
948 | !capable(CAP_SYS_NICE)) | 948 | !capable(CAP_SYS_NICE)) |
949 | ret = -EPERM; | 949 | ret = -EPERM; |
950 | else | 950 | else |
951 | ctx->sched.priority = priority; | 951 | ctx->sched.priority = |
952 | I915_USER_PRIORITY(priority); | ||
952 | } | 953 | } |
953 | break; | 954 | break; |
954 | 955 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 08165f6a0a84..f6d870b1f73e 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h | |||
@@ -163,6 +163,7 @@ struct i915_gem_context { | |||
163 | /** engine: per-engine logical HW state */ | 163 | /** engine: per-engine logical HW state */ |
164 | struct intel_context { | 164 | struct intel_context { |
165 | struct i915_gem_context *gem_context; | 165 | struct i915_gem_context *gem_context; |
166 | struct intel_engine_cs *active; | ||
166 | struct i915_vma *state; | 167 | struct i915_vma *state; |
167 | struct intel_ring *ring; | 168 | struct intel_ring *ring; |
168 | u32 *lrc_reg_state; | 169 | u32 *lrc_reg_state; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 09187286d346..d4fac09095f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb, | |||
460 | * any non-page-aligned or non-canonical addresses. | 460 | * any non-page-aligned or non-canonical addresses. |
461 | */ | 461 | */ |
462 | if (unlikely(entry->flags & EXEC_OBJECT_PINNED && | 462 | if (unlikely(entry->flags & EXEC_OBJECT_PINNED && |
463 | entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK))) | 463 | entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK))) |
464 | return -EINVAL; | 464 | return -EINVAL; |
465 | 465 | ||
466 | /* pad_to_size was once a reserved field, so sanitize it */ | 466 | /* pad_to_size was once a reserved field, so sanitize it */ |
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma, | |||
1268 | else if (gen >= 4) | 1268 | else if (gen >= 4) |
1269 | len = 4; | 1269 | len = 4; |
1270 | else | 1270 | else |
1271 | len = 3; | 1271 | len = 6; |
1272 | 1272 | ||
1273 | batch = reloc_gpu(eb, vma, len); | 1273 | batch = reloc_gpu(eb, vma, len); |
1274 | if (IS_ERR(batch)) | 1274 | if (IS_ERR(batch)) |
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma, | |||
1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | 1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
1310 | *batch++ = addr; | 1310 | *batch++ = addr; |
1311 | *batch++ = target_offset; | 1311 | *batch++ = target_offset; |
1312 | |||
1313 | /* And again for good measure (blb/pnv) */ | ||
1314 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | ||
1315 | *batch++ = addr; | ||
1316 | *batch++ = target_offset; | ||
1312 | } | 1317 | } |
1313 | 1318 | ||
1314 | goto out; | 1319 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56c7f8637311..add1fe7aeb93 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) | |||
133 | i915->ggtt.invalidate(i915); | 133 | i915->ggtt.invalidate(i915); |
134 | } | 134 | } |
135 | 135 | ||
136 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, | ||
137 | int enable_ppgtt) | ||
138 | { | ||
139 | bool has_full_ppgtt; | ||
140 | bool has_full_48bit_ppgtt; | ||
141 | |||
142 | if (!dev_priv->info.has_aliasing_ppgtt) | ||
143 | return 0; | ||
144 | |||
145 | has_full_ppgtt = dev_priv->info.has_full_ppgtt; | ||
146 | has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; | ||
147 | |||
148 | if (intel_vgpu_active(dev_priv)) { | ||
149 | /* GVT-g has no support for 32bit ppgtt */ | ||
150 | has_full_ppgtt = false; | ||
151 | has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv); | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for | ||
156 | * execlists, the sole mechanism available to submit work. | ||
157 | */ | ||
158 | if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) | ||
159 | return 0; | ||
160 | |||
161 | if (enable_ppgtt == 1) | ||
162 | return 1; | ||
163 | |||
164 | if (enable_ppgtt == 2 && has_full_ppgtt) | ||
165 | return 2; | ||
166 | |||
167 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) | ||
168 | return 3; | ||
169 | |||
170 | /* Disable ppgtt on SNB if VT-d is on. */ | ||
171 | if (IS_GEN6(dev_priv) && intel_vtd_active()) { | ||
172 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | if (has_full_48bit_ppgtt) | ||
177 | return 3; | ||
178 | |||
179 | if (has_full_ppgtt) | ||
180 | return 2; | ||
181 | |||
182 | return 1; | ||
183 | } | ||
184 | |||
185 | static int ppgtt_bind_vma(struct i915_vma *vma, | 136 | static int ppgtt_bind_vma(struct i915_vma *vma, |
186 | enum i915_cache_level cache_level, | 137 | enum i915_cache_level cache_level, |
187 | u32 unused) | 138 | u32 unused) |
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma) | |||
235 | memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); | 186 | memset(&vma->page_sizes, 0, sizeof(vma->page_sizes)); |
236 | } | 187 | } |
237 | 188 | ||
238 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, | 189 | static u64 gen8_pte_encode(dma_addr_t addr, |
239 | enum i915_cache_level level, | 190 | enum i915_cache_level level, |
240 | u32 flags) | 191 | u32 flags) |
241 | { | 192 | { |
242 | gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; | 193 | gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; |
243 | 194 | ||
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, | |||
274 | #define gen8_pdpe_encode gen8_pde_encode | 225 | #define gen8_pdpe_encode gen8_pde_encode |
275 | #define gen8_pml4e_encode gen8_pde_encode | 226 | #define gen8_pml4e_encode gen8_pde_encode |
276 | 227 | ||
277 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, | 228 | static u64 snb_pte_encode(dma_addr_t addr, |
278 | enum i915_cache_level level, | 229 | enum i915_cache_level level, |
279 | u32 unused) | 230 | u32 flags) |
280 | { | 231 | { |
281 | gen6_pte_t pte = GEN6_PTE_VALID; | 232 | gen6_pte_t pte = GEN6_PTE_VALID; |
282 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 233 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr, | |||
296 | return pte; | 247 | return pte; |
297 | } | 248 | } |
298 | 249 | ||
299 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, | 250 | static u64 ivb_pte_encode(dma_addr_t addr, |
300 | enum i915_cache_level level, | 251 | enum i915_cache_level level, |
301 | u32 unused) | 252 | u32 flags) |
302 | { | 253 | { |
303 | gen6_pte_t pte = GEN6_PTE_VALID; | 254 | gen6_pte_t pte = GEN6_PTE_VALID; |
304 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 255 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr, | |||
320 | return pte; | 271 | return pte; |
321 | } | 272 | } |
322 | 273 | ||
323 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, | 274 | static u64 byt_pte_encode(dma_addr_t addr, |
324 | enum i915_cache_level level, | 275 | enum i915_cache_level level, |
325 | u32 flags) | 276 | u32 flags) |
326 | { | 277 | { |
327 | gen6_pte_t pte = GEN6_PTE_VALID; | 278 | gen6_pte_t pte = GEN6_PTE_VALID; |
328 | pte |= GEN6_PTE_ADDR_ENCODE(addr); | 279 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr, | |||
336 | return pte; | 287 | return pte; |
337 | } | 288 | } |
338 | 289 | ||
339 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, | 290 | static u64 hsw_pte_encode(dma_addr_t addr, |
340 | enum i915_cache_level level, | 291 | enum i915_cache_level level, |
341 | u32 unused) | 292 | u32 flags) |
342 | { | 293 | { |
343 | gen6_pte_t pte = GEN6_PTE_VALID; | 294 | gen6_pte_t pte = GEN6_PTE_VALID; |
344 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 295 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr, | |||
349 | return pte; | 300 | return pte; |
350 | } | 301 | } |
351 | 302 | ||
352 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, | 303 | static u64 iris_pte_encode(dma_addr_t addr, |
353 | enum i915_cache_level level, | 304 | enum i915_cache_level level, |
354 | u32 unused) | 305 | u32 flags) |
355 | { | 306 | { |
356 | gen6_pte_t pte = GEN6_PTE_VALID; | 307 | gen6_pte_t pte = GEN6_PTE_VALID; |
357 | pte |= HSW_PTE_ADDR_ENCODE(addr); | 308 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) | |||
629 | * region, including any PTEs which happen to point to scratch. | 580 | * region, including any PTEs which happen to point to scratch. |
630 | * | 581 | * |
631 | * This is only relevant for the 48b PPGTT where we support | 582 | * This is only relevant for the 48b PPGTT where we support |
632 | * huge-gtt-pages, see also i915_vma_insert(). | 583 | * huge-gtt-pages, see also i915_vma_insert(). However, as we share the |
633 | * | 584 | * scratch (read-only) between all vm, we create one 64k scratch page |
634 | * TODO: we should really consider write-protecting the scratch-page and | 585 | * for all. |
635 | * sharing between ppgtt | ||
636 | */ | 586 | */ |
637 | size = I915_GTT_PAGE_SIZE_4K; | 587 | size = I915_GTT_PAGE_SIZE_4K; |
638 | if (i915_vm_is_48bit(vm) && | 588 | if (i915_vm_is_48bit(vm) && |
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) | |||
715 | static void gen8_initialize_pt(struct i915_address_space *vm, | 665 | static void gen8_initialize_pt(struct i915_address_space *vm, |
716 | struct i915_page_table *pt) | 666 | struct i915_page_table *pt) |
717 | { | 667 | { |
718 | fill_px(vm, pt, | 668 | fill_px(vm, pt, vm->scratch_pte); |
719 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); | ||
720 | } | 669 | } |
721 | 670 | ||
722 | static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt, | 671 | static void gen6_initialize_pt(struct i915_address_space *vm, |
723 | struct i915_page_table *pt) | 672 | struct i915_page_table *pt) |
724 | { | 673 | { |
725 | fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte); | 674 | fill32_px(vm, pt, vm->scratch_pte); |
726 | } | 675 | } |
727 | 676 | ||
728 | static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) | 677 | static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) |
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | |||
856 | /* Removes entries from a single page table, releasing it if it's empty. | 805 | /* Removes entries from a single page table, releasing it if it's empty. |
857 | * Caller can use the return value to update higher-level entries. | 806 | * Caller can use the return value to update higher-level entries. |
858 | */ | 807 | */ |
859 | static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | 808 | static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, |
860 | struct i915_page_table *pt, | 809 | struct i915_page_table *pt, |
861 | u64 start, u64 length) | 810 | u64 start, u64 length) |
862 | { | 811 | { |
863 | unsigned int num_entries = gen8_pte_count(start, length); | 812 | unsigned int num_entries = gen8_pte_count(start, length); |
864 | unsigned int pte = gen8_pte_index(start); | 813 | unsigned int pte = gen8_pte_index(start); |
865 | unsigned int pte_end = pte + num_entries; | 814 | unsigned int pte_end = pte + num_entries; |
866 | const gen8_pte_t scratch_pte = | ||
867 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); | ||
868 | gen8_pte_t *vaddr; | 815 | gen8_pte_t *vaddr; |
869 | 816 | ||
870 | GEM_BUG_ON(num_entries > pt->used_ptes); | 817 | GEM_BUG_ON(num_entries > pt->used_ptes); |
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |||
875 | 822 | ||
876 | vaddr = kmap_atomic_px(pt); | 823 | vaddr = kmap_atomic_px(pt); |
877 | while (pte < pte_end) | 824 | while (pte < pte_end) |
878 | vaddr[pte++] = scratch_pte; | 825 | vaddr[pte++] = vm->scratch_pte; |
879 | kunmap_atomic(vaddr); | 826 | kunmap_atomic(vaddr); |
880 | 827 | ||
881 | return false; | 828 | return false; |
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma, | |||
1208 | if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { | 1155 | if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) { |
1209 | u16 i; | 1156 | u16 i; |
1210 | 1157 | ||
1211 | encode = pte_encode | vma->vm->scratch_page.daddr; | 1158 | encode = vma->vm->scratch_pte; |
1212 | vaddr = kmap_atomic_px(pd->page_table[idx.pde]); | 1159 | vaddr = kmap_atomic_px(pd->page_table[idx.pde]); |
1213 | 1160 | ||
1214 | for (i = 1; i < index; i += 16) | 1161 | for (i = 1; i < index; i += 16) |
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm) | |||
1261 | { | 1208 | { |
1262 | int ret; | 1209 | int ret; |
1263 | 1210 | ||
1211 | /* | ||
1212 | * If everybody agrees to not to write into the scratch page, | ||
1213 | * we can reuse it for all vm, keeping contexts and processes separate. | ||
1214 | */ | ||
1215 | if (vm->has_read_only && | ||
1216 | vm->i915->kernel_context && | ||
1217 | vm->i915->kernel_context->ppgtt) { | ||
1218 | struct i915_address_space *clone = | ||
1219 | &vm->i915->kernel_context->ppgtt->vm; | ||
1220 | |||
1221 | GEM_BUG_ON(!clone->has_read_only); | ||
1222 | |||
1223 | vm->scratch_page.order = clone->scratch_page.order; | ||
1224 | vm->scratch_pte = clone->scratch_pte; | ||
1225 | vm->scratch_pt = clone->scratch_pt; | ||
1226 | vm->scratch_pd = clone->scratch_pd; | ||
1227 | vm->scratch_pdp = clone->scratch_pdp; | ||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1264 | ret = setup_scratch_page(vm, __GFP_HIGHMEM); | 1231 | ret = setup_scratch_page(vm, __GFP_HIGHMEM); |
1265 | if (ret) | 1232 | if (ret) |
1266 | return ret; | 1233 | return ret; |
1267 | 1234 | ||
1235 | vm->scratch_pte = | ||
1236 | gen8_pte_encode(vm->scratch_page.daddr, | ||
1237 | I915_CACHE_LLC, | ||
1238 | PTE_READ_ONLY); | ||
1239 | |||
1268 | vm->scratch_pt = alloc_pt(vm); | 1240 | vm->scratch_pt = alloc_pt(vm); |
1269 | if (IS_ERR(vm->scratch_pt)) { | 1241 | if (IS_ERR(vm->scratch_pt)) { |
1270 | ret = PTR_ERR(vm->scratch_pt); | 1242 | ret = PTR_ERR(vm->scratch_pt); |
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) | |||
1336 | 1308 | ||
1337 | static void gen8_free_scratch(struct i915_address_space *vm) | 1309 | static void gen8_free_scratch(struct i915_address_space *vm) |
1338 | { | 1310 | { |
1311 | if (!vm->scratch_page.daddr) | ||
1312 | return; | ||
1313 | |||
1339 | if (use_4lvl(vm)) | 1314 | if (use_4lvl(vm)) |
1340 | free_pdp(vm, vm->scratch_pdp); | 1315 | free_pdp(vm, vm->scratch_pdp); |
1341 | free_pd(vm, vm->scratch_pd); | 1316 | free_pd(vm, vm->scratch_pd); |
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, | |||
1573 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | 1548 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1574 | { | 1549 | { |
1575 | struct i915_address_space *vm = &ppgtt->vm; | 1550 | struct i915_address_space *vm = &ppgtt->vm; |
1576 | const gen8_pte_t scratch_pte = | 1551 | const gen8_pte_t scratch_pte = vm->scratch_pte; |
1577 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); | ||
1578 | u64 start = 0, length = ppgtt->vm.total; | 1552 | u64 start = 0, length = ppgtt->vm.total; |
1579 | 1553 | ||
1580 | if (use_4lvl(vm)) { | 1554 | if (use_4lvl(vm)) { |
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) | |||
1647 | ppgtt->vm.i915 = i915; | 1621 | ppgtt->vm.i915 = i915; |
1648 | ppgtt->vm.dma = &i915->drm.pdev->dev; | 1622 | ppgtt->vm.dma = &i915->drm.pdev->dev; |
1649 | 1623 | ||
1650 | ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ? | 1624 | ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? |
1651 | 1ULL << 48 : | 1625 | 1ULL << 48 : |
1652 | 1ULL << 32; | 1626 | 1ULL << 32; |
1653 | 1627 | ||
1654 | /* | 1628 | /* From bdw, there is support for read-only pages in the PPGTT. */ |
1655 | * From bdw, there is support for read-only pages in the PPGTT. | 1629 | ppgtt->vm.has_read_only = true; |
1656 | * | ||
1657 | * XXX GVT is not honouring the lack of RW in the PTE bits. | ||
1658 | */ | ||
1659 | ppgtt->vm.has_read_only = !intel_vgpu_active(i915); | ||
1660 | 1630 | ||
1661 | i915_address_space_init(&ppgtt->vm, i915); | 1631 | i915_address_space_init(&ppgtt->vm, i915); |
1662 | 1632 | ||
@@ -1721,7 +1691,7 @@ err_free: | |||
1721 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) | 1691 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) |
1722 | { | 1692 | { |
1723 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); | 1693 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); |
1724 | const gen6_pte_t scratch_pte = ppgtt->scratch_pte; | 1694 | const gen6_pte_t scratch_pte = base->vm.scratch_pte; |
1725 | struct i915_page_table *pt; | 1695 | struct i915_page_table *pt; |
1726 | u32 pte, pde; | 1696 | u32 pte, pde; |
1727 | 1697 | ||
@@ -1757,7 +1727,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m) | |||
1757 | if (i == 4) | 1727 | if (i == 4) |
1758 | continue; | 1728 | continue; |
1759 | 1729 | ||
1760 | seq_printf(m, "\t\t(%03d, %04d) %08lx: ", | 1730 | seq_printf(m, "\t\t(%03d, %04d) %08llx: ", |
1761 | pde, pte, | 1731 | pde, pte, |
1762 | (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); | 1732 | (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE); |
1763 | for (i = 0; i < 4; i++) { | 1733 | for (i = 0; i < 4; i++) { |
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt, | |||
1782 | ppgtt->pd_addr + pde); | 1752 | ppgtt->pd_addr + pde); |
1783 | } | 1753 | } |
1784 | 1754 | ||
1785 | static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) | ||
1786 | { | ||
1787 | struct intel_engine_cs *engine; | ||
1788 | enum intel_engine_id id; | ||
1789 | |||
1790 | for_each_engine(engine, dev_priv, id) { | ||
1791 | u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? | ||
1792 | GEN8_GFX_PPGTT_48B : 0; | ||
1793 | I915_WRITE(RING_MODE_GEN7(engine), | ||
1794 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); | ||
1795 | } | ||
1796 | } | ||
1797 | |||
1798 | static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) | 1755 | static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) |
1799 | { | 1756 | { |
1800 | struct intel_engine_cs *engine; | 1757 | struct intel_engine_cs *engine; |
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) | |||
1834 | ecochk = I915_READ(GAM_ECOCHK); | 1791 | ecochk = I915_READ(GAM_ECOCHK); |
1835 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); | 1792 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); |
1836 | 1793 | ||
1837 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | 1794 | if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */ |
1795 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | ||
1838 | } | 1796 | } |
1839 | 1797 | ||
1840 | /* PPGTT support for Sandybdrige/Gen6 and later */ | 1798 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, | |||
1846 | unsigned int pde = first_entry / GEN6_PTES; | 1804 | unsigned int pde = first_entry / GEN6_PTES; |
1847 | unsigned int pte = first_entry % GEN6_PTES; | 1805 | unsigned int pte = first_entry % GEN6_PTES; |
1848 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE; | 1806 | unsigned int num_entries = length / I915_GTT_PAGE_SIZE; |
1849 | const gen6_pte_t scratch_pte = ppgtt->scratch_pte; | 1807 | const gen6_pte_t scratch_pte = vm->scratch_pte; |
1850 | 1808 | ||
1851 | while (num_entries) { | 1809 | while (num_entries) { |
1852 | struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; | 1810 | struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; |
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, | |||
1937 | if (IS_ERR(pt)) | 1895 | if (IS_ERR(pt)) |
1938 | goto unwind_out; | 1896 | goto unwind_out; |
1939 | 1897 | ||
1940 | gen6_initialize_pt(ppgtt, pt); | 1898 | gen6_initialize_pt(vm, pt); |
1941 | ppgtt->base.pd.page_table[pde] = pt; | 1899 | ppgtt->base.pd.page_table[pde] = pt; |
1942 | 1900 | ||
1943 | if (i915_vma_is_bound(ppgtt->vma, | 1901 | if (i915_vma_is_bound(ppgtt->vma, |
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) | |||
1975 | if (ret) | 1933 | if (ret) |
1976 | return ret; | 1934 | return ret; |
1977 | 1935 | ||
1978 | ppgtt->scratch_pte = | 1936 | vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
1979 | vm->pte_encode(vm->scratch_page.daddr, | 1937 | I915_CACHE_NONE, |
1980 | I915_CACHE_NONE, PTE_READ_ONLY); | 1938 | PTE_READ_ONLY); |
1981 | 1939 | ||
1982 | vm->scratch_pt = alloc_pt(vm); | 1940 | vm->scratch_pt = alloc_pt(vm); |
1983 | if (IS_ERR(vm->scratch_pt)) { | 1941 | if (IS_ERR(vm->scratch_pt)) { |
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt) | |||
1985 | return PTR_ERR(vm->scratch_pt); | 1943 | return PTR_ERR(vm->scratch_pt); |
1986 | } | 1944 | } |
1987 | 1945 | ||
1988 | gen6_initialize_pt(ppgtt, vm->scratch_pt); | 1946 | gen6_initialize_pt(vm, vm->scratch_pt); |
1989 | gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) | 1947 | gen6_for_all_pdes(unused, &ppgtt->base.pd, pde) |
1990 | ppgtt->base.pd.page_table[pde] = vm->scratch_pt; | 1948 | ppgtt->base.pd.page_table[pde] = vm->scratch_pt; |
1991 | 1949 | ||
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) | |||
2237 | { | 2195 | { |
2238 | gtt_write_workarounds(dev_priv); | 2196 | gtt_write_workarounds(dev_priv); |
2239 | 2197 | ||
2240 | /* In the case of execlists, PPGTT is enabled by the context descriptor | ||
2241 | * and the PDPs are contained within the context itself. We don't | ||
2242 | * need to do anything here. */ | ||
2243 | if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) | ||
2244 | return 0; | ||
2245 | |||
2246 | if (!USES_PPGTT(dev_priv)) | ||
2247 | return 0; | ||
2248 | |||
2249 | if (IS_GEN6(dev_priv)) | 2198 | if (IS_GEN6(dev_priv)) |
2250 | gen6_ppgtt_enable(dev_priv); | 2199 | gen6_ppgtt_enable(dev_priv); |
2251 | else if (IS_GEN7(dev_priv)) | 2200 | else if (IS_GEN7(dev_priv)) |
2252 | gen7_ppgtt_enable(dev_priv); | 2201 | gen7_ppgtt_enable(dev_priv); |
2253 | else if (INTEL_GEN(dev_priv) >= 8) | ||
2254 | gen8_ppgtt_enable(dev_priv); | ||
2255 | else | ||
2256 | MISSING_CASE(INTEL_GEN(dev_priv)); | ||
2257 | 2202 | ||
2258 | return 0; | 2203 | return 0; |
2259 | } | 2204 | } |
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, | |||
2543 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | 2488 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
2544 | unsigned first_entry = start / I915_GTT_PAGE_SIZE; | 2489 | unsigned first_entry = start / I915_GTT_PAGE_SIZE; |
2545 | unsigned num_entries = length / I915_GTT_PAGE_SIZE; | 2490 | unsigned num_entries = length / I915_GTT_PAGE_SIZE; |
2546 | const gen8_pte_t scratch_pte = | 2491 | const gen8_pte_t scratch_pte = vm->scratch_pte; |
2547 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); | ||
2548 | gen8_pte_t __iomem *gtt_base = | 2492 | gen8_pte_t __iomem *gtt_base = |
2549 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; | 2493 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; |
2550 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; | 2494 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; |
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, | |||
2669 | first_entry, num_entries, max_entries)) | 2613 | first_entry, num_entries, max_entries)) |
2670 | num_entries = max_entries; | 2614 | num_entries = max_entries; |
2671 | 2615 | ||
2672 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, | 2616 | scratch_pte = vm->scratch_pte; |
2673 | I915_CACHE_LLC, 0); | ||
2674 | 2617 | ||
2675 | for (i = 0; i < num_entries; i++) | 2618 | for (i = 0; i < num_entries; i++) |
2676 | iowrite32(scratch_pte, >t_base[i]); | 2619 | iowrite32(scratch_pte, >t_base[i]); |
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) | |||
2952 | /* And finally clear the reserved guard page */ | 2895 | /* And finally clear the reserved guard page */ |
2953 | ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); | 2896 | ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); |
2954 | 2897 | ||
2955 | if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { | 2898 | if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) { |
2956 | ret = i915_gem_init_aliasing_ppgtt(dev_priv); | 2899 | ret = i915_gem_init_aliasing_ppgtt(dev_priv); |
2957 | if (ret) | 2900 | if (ret) |
2958 | goto err; | 2901 | goto err; |
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) | |||
3076 | return ret; | 3019 | return ret; |
3077 | } | 3020 | } |
3078 | 3021 | ||
3022 | ggtt->vm.scratch_pte = | ||
3023 | ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr, | ||
3024 | I915_CACHE_NONE, 0); | ||
3025 | |||
3079 | return 0; | 3026 | return 0; |
3080 | } | 3027 | } |
3081 | 3028 | ||
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat) | |||
3275 | ppat->match = bdw_private_pat_match; | 3222 | ppat->match = bdw_private_pat_match; |
3276 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); | 3223 | ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); |
3277 | 3224 | ||
3278 | if (!USES_PPGTT(ppat->i915)) { | 3225 | if (!HAS_PPGTT(ppat->i915)) { |
3279 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, | 3226 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
3280 | * so RTL will always use the value corresponding to | 3227 | * so RTL will always use the value corresponding to |
3281 | * pat_sel = 000". | 3228 | * pat_sel = 000". |
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
3402 | ggtt->vm.cleanup = gen6_gmch_remove; | 3349 | ggtt->vm.cleanup = gen6_gmch_remove; |
3403 | ggtt->vm.insert_page = gen8_ggtt_insert_page; | 3350 | ggtt->vm.insert_page = gen8_ggtt_insert_page; |
3404 | ggtt->vm.clear_range = nop_clear_range; | 3351 | ggtt->vm.clear_range = nop_clear_range; |
3405 | if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) | 3352 | if (intel_scanout_needs_vtd_wa(dev_priv)) |
3406 | ggtt->vm.clear_range = gen8_ggtt_clear_range; | 3353 | ggtt->vm.clear_range = gen8_ggtt_clear_range; |
3407 | 3354 | ||
3408 | ggtt->vm.insert_entries = gen8_ggtt_insert_entries; | 3355 | ggtt->vm.insert_entries = gen8_ggtt_insert_entries; |
@@ -3413,6 +3360,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
3413 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; | 3360 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; |
3414 | if (ggtt->vm.clear_range != nop_clear_range) | 3361 | if (ggtt->vm.clear_range != nop_clear_range) |
3415 | ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; | 3362 | ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; |
3363 | |||
3364 | /* Prevent recursively calling stop_machine() and deadlocks. */ | ||
3365 | dev_info(dev_priv->drm.dev, | ||
3366 | "Disabling error capture for VT-d workaround\n"); | ||
3367 | i915_disable_error_state(dev_priv, -ENODEV); | ||
3416 | } | 3368 | } |
3417 | 3369 | ||
3418 | ggtt->invalidate = gen6_ggtt_invalidate; | 3370 | ggtt->invalidate = gen6_ggtt_invalidate; |
@@ -3422,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
3422 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; | 3374 | ggtt->vm.vma_ops.set_pages = ggtt_set_pages; |
3423 | ggtt->vm.vma_ops.clear_pages = clear_pages; | 3375 | ggtt->vm.vma_ops.clear_pages = clear_pages; |
3424 | 3376 | ||
3377 | ggtt->vm.pte_encode = gen8_pte_encode; | ||
3378 | |||
3425 | setup_private_pat(dev_priv); | 3379 | setup_private_pat(dev_priv); |
3426 | 3380 | ||
3427 | return ggtt_probe_common(ggtt, size); | 3381 | return ggtt_probe_common(ggtt, size); |
@@ -3609,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) | |||
3609 | /* Only VLV supports read-only GGTT mappings */ | 3563 | /* Only VLV supports read-only GGTT mappings */ |
3610 | ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); | 3564 | ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv); |
3611 | 3565 | ||
3612 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) | 3566 | if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv)) |
3613 | ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; | 3567 | ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; |
3614 | mutex_unlock(&dev_priv->drm.struct_mutex); | 3568 | mutex_unlock(&dev_priv->drm.struct_mutex); |
3615 | 3569 | ||
@@ -3711,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) | |||
3711 | } | 3665 | } |
3712 | 3666 | ||
3713 | static struct scatterlist * | 3667 | static struct scatterlist * |
3714 | rotate_pages(const dma_addr_t *in, unsigned int offset, | 3668 | rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset, |
3715 | unsigned int width, unsigned int height, | 3669 | unsigned int width, unsigned int height, |
3716 | unsigned int stride, | 3670 | unsigned int stride, |
3717 | struct sg_table *st, struct scatterlist *sg) | 3671 | struct sg_table *st, struct scatterlist *sg) |
@@ -3720,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, | |||
3720 | unsigned int src_idx; | 3674 | unsigned int src_idx; |
3721 | 3675 | ||
3722 | for (column = 0; column < width; column++) { | 3676 | for (column = 0; column < width; column++) { |
3723 | src_idx = stride * (height - 1) + column; | 3677 | src_idx = stride * (height - 1) + column + offset; |
3724 | for (row = 0; row < height; row++) { | 3678 | for (row = 0; row < height; row++) { |
3725 | st->nents++; | 3679 | st->nents++; |
3726 | /* We don't need the pages, but need to initialize | 3680 | /* We don't need the pages, but need to initialize |
@@ -3728,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, | |||
3728 | * The only thing we need are DMA addresses. | 3682 | * The only thing we need are DMA addresses. |
3729 | */ | 3683 | */ |
3730 | sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); | 3684 | sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0); |
3731 | sg_dma_address(sg) = in[offset + src_idx]; | 3685 | sg_dma_address(sg) = |
3686 | i915_gem_object_get_dma_address(obj, src_idx); | ||
3732 | sg_dma_len(sg) = I915_GTT_PAGE_SIZE; | 3687 | sg_dma_len(sg) = I915_GTT_PAGE_SIZE; |
3733 | sg = sg_next(sg); | 3688 | sg = sg_next(sg); |
3734 | src_idx -= stride; | 3689 | src_idx -= stride; |
@@ -3742,22 +3697,11 @@ static noinline struct sg_table * | |||
3742 | intel_rotate_pages(struct intel_rotation_info *rot_info, | 3697 | intel_rotate_pages(struct intel_rotation_info *rot_info, |
3743 | struct drm_i915_gem_object *obj) | 3698 | struct drm_i915_gem_object *obj) |
3744 | { | 3699 | { |
3745 | const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE; | ||
3746 | unsigned int size = intel_rotation_info_size(rot_info); | 3700 | unsigned int size = intel_rotation_info_size(rot_info); |
3747 | struct sgt_iter sgt_iter; | ||
3748 | dma_addr_t dma_addr; | ||
3749 | unsigned long i; | ||
3750 | dma_addr_t *page_addr_list; | ||
3751 | struct sg_table *st; | 3701 | struct sg_table *st; |
3752 | struct scatterlist *sg; | 3702 | struct scatterlist *sg; |
3753 | int ret = -ENOMEM; | 3703 | int ret = -ENOMEM; |
3754 | 3704 | int i; | |
3755 | /* Allocate a temporary list of source pages for random access. */ | ||
3756 | page_addr_list = kvmalloc_array(n_pages, | ||
3757 | sizeof(dma_addr_t), | ||
3758 | GFP_KERNEL); | ||
3759 | if (!page_addr_list) | ||
3760 | return ERR_PTR(ret); | ||
3761 | 3705 | ||
3762 | /* Allocate target SG list. */ | 3706 | /* Allocate target SG list. */ |
3763 | st = kmalloc(sizeof(*st), GFP_KERNEL); | 3707 | st = kmalloc(sizeof(*st), GFP_KERNEL); |
@@ -3768,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info, | |||
3768 | if (ret) | 3712 | if (ret) |
3769 | goto err_sg_alloc; | 3713 | goto err_sg_alloc; |
3770 | 3714 | ||
3771 | /* Populate source page list from the object. */ | ||
3772 | i = 0; | ||
3773 | for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages) | ||
3774 | page_addr_list[i++] = dma_addr; | ||
3775 | |||
3776 | GEM_BUG_ON(i != n_pages); | ||
3777 | st->nents = 0; | 3715 | st->nents = 0; |
3778 | sg = st->sgl; | 3716 | sg = st->sgl; |
3779 | 3717 | ||
3780 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { | 3718 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { |
3781 | sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, | 3719 | sg = rotate_pages(obj, rot_info->plane[i].offset, |
3782 | rot_info->plane[i].width, rot_info->plane[i].height, | 3720 | rot_info->plane[i].width, rot_info->plane[i].height, |
3783 | rot_info->plane[i].stride, st, sg); | 3721 | rot_info->plane[i].stride, st, sg); |
3784 | } | 3722 | } |
3785 | 3723 | ||
3786 | kvfree(page_addr_list); | ||
3787 | |||
3788 | return st; | 3724 | return st; |
3789 | 3725 | ||
3790 | err_sg_alloc: | 3726 | err_sg_alloc: |
3791 | kfree(st); | 3727 | kfree(st); |
3792 | err_st_alloc: | 3728 | err_st_alloc: |
3793 | kvfree(page_addr_list); | ||
3794 | 3729 | ||
3795 | DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", | 3730 | DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
3796 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); | 3731 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); |
@@ -3835,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, | |||
3835 | count -= len >> PAGE_SHIFT; | 3770 | count -= len >> PAGE_SHIFT; |
3836 | if (count == 0) { | 3771 | if (count == 0) { |
3837 | sg_mark_end(sg); | 3772 | sg_mark_end(sg); |
3773 | i915_sg_trim(st); /* Drop any unused tail entries. */ | ||
3774 | |||
3838 | return st; | 3775 | return st; |
3839 | } | 3776 | } |
3840 | 3777 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 7e2af5f4f39b..4874da09a3c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h | |||
@@ -42,13 +42,15 @@ | |||
42 | #include "i915_selftest.h" | 42 | #include "i915_selftest.h" |
43 | #include "i915_timeline.h" | 43 | #include "i915_timeline.h" |
44 | 44 | ||
45 | #define I915_GTT_PAGE_SIZE_4K BIT(12) | 45 | #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12) |
46 | #define I915_GTT_PAGE_SIZE_64K BIT(16) | 46 | #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16) |
47 | #define I915_GTT_PAGE_SIZE_2M BIT(21) | 47 | #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21) |
48 | 48 | ||
49 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K | 49 | #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K |
50 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M | 50 | #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M |
51 | 51 | ||
52 | #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE | ||
53 | |||
52 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE | 54 | #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE |
53 | 55 | ||
54 | #define I915_FENCE_REG_NONE -1 | 56 | #define I915_FENCE_REG_NONE -1 |
@@ -287,6 +289,7 @@ struct i915_address_space { | |||
287 | 289 | ||
288 | struct mutex mutex; /* protects vma and our lists */ | 290 | struct mutex mutex; /* protects vma and our lists */ |
289 | 291 | ||
292 | u64 scratch_pte; | ||
290 | struct i915_page_dma scratch_page; | 293 | struct i915_page_dma scratch_page; |
291 | struct i915_page_table *scratch_pt; | 294 | struct i915_page_table *scratch_pt; |
292 | struct i915_page_directory *scratch_pd; | 295 | struct i915_page_directory *scratch_pd; |
@@ -333,12 +336,11 @@ struct i915_address_space { | |||
333 | /* Some systems support read-only mappings for GGTT and/or PPGTT */ | 336 | /* Some systems support read-only mappings for GGTT and/or PPGTT */ |
334 | bool has_read_only:1; | 337 | bool has_read_only:1; |
335 | 338 | ||
336 | /* FIXME: Need a more generic return type */ | 339 | u64 (*pte_encode)(dma_addr_t addr, |
337 | gen6_pte_t (*pte_encode)(dma_addr_t addr, | 340 | enum i915_cache_level level, |
338 | enum i915_cache_level level, | 341 | u32 flags); /* Create a valid PTE */ |
339 | u32 flags); /* Create a valid PTE */ | ||
340 | /* flags for pte_encode */ | ||
341 | #define PTE_READ_ONLY (1<<0) | 342 | #define PTE_READ_ONLY (1<<0) |
343 | |||
342 | int (*allocate_va_range)(struct i915_address_space *vm, | 344 | int (*allocate_va_range)(struct i915_address_space *vm, |
343 | u64 start, u64 length); | 345 | u64 start, u64 length); |
344 | void (*clear_range)(struct i915_address_space *vm, | 346 | void (*clear_range)(struct i915_address_space *vm, |
@@ -420,7 +422,6 @@ struct gen6_hw_ppgtt { | |||
420 | 422 | ||
421 | struct i915_vma *vma; | 423 | struct i915_vma *vma; |
422 | gen6_pte_t __iomem *pd_addr; | 424 | gen6_pte_t __iomem *pd_addr; |
423 | gen6_pte_t scratch_pte; | ||
424 | 425 | ||
425 | unsigned int pin_count; | 426 | unsigned int pin_count; |
426 | bool scan_for_unused_pt; | 427 | bool scan_for_unused_pt; |
@@ -659,20 +660,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, | |||
659 | u64 start, u64 end, unsigned int flags); | 660 | u64 start, u64 end, unsigned int flags); |
660 | 661 | ||
661 | /* Flags used by pin/bind&friends. */ | 662 | /* Flags used by pin/bind&friends. */ |
662 | #define PIN_NONBLOCK BIT(0) | 663 | #define PIN_NONBLOCK BIT_ULL(0) |
663 | #define PIN_MAPPABLE BIT(1) | 664 | #define PIN_MAPPABLE BIT_ULL(1) |
664 | #define PIN_ZONE_4G BIT(2) | 665 | #define PIN_ZONE_4G BIT_ULL(2) |
665 | #define PIN_NONFAULT BIT(3) | 666 | #define PIN_NONFAULT BIT_ULL(3) |
666 | #define PIN_NOEVICT BIT(4) | 667 | #define PIN_NOEVICT BIT_ULL(4) |
667 | 668 | ||
668 | #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */ | 669 | #define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */ |
669 | #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */ | 670 | #define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */ |
670 | #define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */ | 671 | #define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */ |
671 | #define PIN_UPDATE BIT(8) | 672 | #define PIN_UPDATE BIT_ULL(8) |
672 | 673 | ||
673 | #define PIN_HIGH BIT(9) | 674 | #define PIN_HIGH BIT_ULL(9) |
674 | #define PIN_OFFSET_BIAS BIT(10) | 675 | #define PIN_OFFSET_BIAS BIT_ULL(10) |
675 | #define PIN_OFFSET_FIXED BIT(11) | 676 | #define PIN_OFFSET_FIXED BIT_ULL(11) |
676 | #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) | 677 | #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) |
677 | 678 | ||
678 | #endif | 679 | #endif |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8762d17b6659..8123bf0e4807 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * | 27 | * |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #include <generated/utsrelease.h> | 30 | #include <linux/utsname.h> |
31 | #include <linux/stop_machine.h> | 31 | #include <linux/stop_machine.h> |
32 | #include <linux/zlib.h> | 32 | #include <linux/zlib.h> |
33 | #include <drm/drm_print.h> | 33 | #include <drm/drm_print.h> |
@@ -512,7 +512,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, | |||
512 | err_printf(m, " SYNC_2: 0x%08x\n", | 512 | err_printf(m, " SYNC_2: 0x%08x\n", |
513 | ee->semaphore_mboxes[2]); | 513 | ee->semaphore_mboxes[2]); |
514 | } | 514 | } |
515 | if (USES_PPGTT(m->i915)) { | 515 | if (HAS_PPGTT(m->i915)) { |
516 | err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); | 516 | err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); |
517 | 517 | ||
518 | if (INTEL_GEN(m->i915) >= 8) { | 518 | if (INTEL_GEN(m->i915) >= 8) { |
@@ -648,9 +648,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | if (IS_ERR(error)) | ||
652 | return PTR_ERR(error); | ||
653 | |||
651 | if (*error->error_msg) | 654 | if (*error->error_msg) |
652 | err_printf(m, "%s\n", error->error_msg); | 655 | err_printf(m, "%s\n", error->error_msg); |
653 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); | 656 | err_printf(m, "Kernel: %s\n", init_utsname()->release); |
654 | ts = ktime_to_timespec64(error->time); | 657 | ts = ktime_to_timespec64(error->time); |
655 | err_printf(m, "Time: %lld s %ld us\n", | 658 | err_printf(m, "Time: %lld s %ld us\n", |
656 | (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); | 659 | (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); |
@@ -999,7 +1002,6 @@ i915_error_object_create(struct drm_i915_private *i915, | |||
999 | } | 1002 | } |
1000 | 1003 | ||
1001 | compress_fini(&compress, dst); | 1004 | compress_fini(&compress, dst); |
1002 | ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); | ||
1003 | return dst; | 1005 | return dst; |
1004 | } | 1006 | } |
1005 | 1007 | ||
@@ -1268,7 +1270,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error, | |||
1268 | ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, | 1270 | ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, |
1269 | engine); | 1271 | engine); |
1270 | 1272 | ||
1271 | if (USES_PPGTT(dev_priv)) { | 1273 | if (HAS_PPGTT(dev_priv)) { |
1272 | int i; | 1274 | int i; |
1273 | 1275 | ||
1274 | ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); | 1276 | ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); |
@@ -1785,6 +1787,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error) | |||
1785 | return epoch; | 1787 | return epoch; |
1786 | } | 1788 | } |
1787 | 1789 | ||
1790 | static void capture_finish(struct i915_gpu_state *error) | ||
1791 | { | ||
1792 | struct i915_ggtt *ggtt = &error->i915->ggtt; | ||
1793 | const u64 slot = ggtt->error_capture.start; | ||
1794 | |||
1795 | ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); | ||
1796 | } | ||
1797 | |||
1788 | static int capture(void *data) | 1798 | static int capture(void *data) |
1789 | { | 1799 | { |
1790 | struct i915_gpu_state *error = data; | 1800 | struct i915_gpu_state *error = data; |
@@ -1809,6 +1819,7 @@ static int capture(void *data) | |||
1809 | 1819 | ||
1810 | error->epoch = capture_find_epoch(error); | 1820 | error->epoch = capture_find_epoch(error); |
1811 | 1821 | ||
1822 | capture_finish(error); | ||
1812 | return 0; | 1823 | return 0; |
1813 | } | 1824 | } |
1814 | 1825 | ||
@@ -1859,6 +1870,7 @@ void i915_capture_error_state(struct drm_i915_private *i915, | |||
1859 | error = i915_capture_gpu_state(i915); | 1870 | error = i915_capture_gpu_state(i915); |
1860 | if (!error) { | 1871 | if (!error) { |
1861 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 1872 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
1873 | i915_disable_error_state(i915, -ENOMEM); | ||
1862 | return; | 1874 | return; |
1863 | } | 1875 | } |
1864 | 1876 | ||
@@ -1914,5 +1926,14 @@ void i915_reset_error_state(struct drm_i915_private *i915) | |||
1914 | i915->gpu_error.first_error = NULL; | 1926 | i915->gpu_error.first_error = NULL; |
1915 | spin_unlock_irq(&i915->gpu_error.lock); | 1927 | spin_unlock_irq(&i915->gpu_error.lock); |
1916 | 1928 | ||
1917 | i915_gpu_state_put(error); | 1929 | if (!IS_ERR(error)) |
1930 | i915_gpu_state_put(error); | ||
1931 | } | ||
1932 | |||
1933 | void i915_disable_error_state(struct drm_i915_private *i915, int err) | ||
1934 | { | ||
1935 | spin_lock_irq(&i915->gpu_error.lock); | ||
1936 | if (!i915->gpu_error.first_error) | ||
1937 | i915->gpu_error.first_error = ERR_PTR(err); | ||
1938 | spin_unlock_irq(&i915->gpu_error.lock); | ||
1918 | } | 1939 | } |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 8710fb18ed74..3ec89a504de5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h | |||
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) | |||
343 | 343 | ||
344 | struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); | 344 | struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); |
345 | void i915_reset_error_state(struct drm_i915_private *i915); | 345 | void i915_reset_error_state(struct drm_i915_private *i915); |
346 | void i915_disable_error_state(struct drm_i915_private *i915, int err); | ||
346 | 347 | ||
347 | #else | 348 | #else |
348 | 349 | ||
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, | |||
355 | static inline struct i915_gpu_state * | 356 | static inline struct i915_gpu_state * |
356 | i915_first_error_state(struct drm_i915_private *i915) | 357 | i915_first_error_state(struct drm_i915_private *i915) |
357 | { | 358 | { |
358 | return NULL; | 359 | return ERR_PTR(-ENODEV); |
359 | } | 360 | } |
360 | 361 | ||
361 | static inline void i915_reset_error_state(struct drm_i915_private *i915) | 362 | static inline void i915_reset_error_state(struct drm_i915_private *i915) |
362 | { | 363 | { |
363 | } | 364 | } |
364 | 365 | ||
366 | static inline void i915_disable_error_state(struct drm_i915_private *i915, | ||
367 | int err) | ||
368 | { | ||
369 | } | ||
370 | |||
365 | #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ | 371 | #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ |
366 | 372 | ||
367 | #endif /* _I915_GPU_ERROR_H_ */ | 373 | #endif /* _I915_GPU_ERROR_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2e242270e270..d447d7d508f4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) | |||
2887 | return ret; | 2887 | return ret; |
2888 | } | 2888 | } |
2889 | 2889 | ||
2890 | static inline u32 gen8_master_intr_disable(void __iomem * const regs) | ||
2891 | { | ||
2892 | raw_reg_write(regs, GEN8_MASTER_IRQ, 0); | ||
2893 | |||
2894 | /* | ||
2895 | * Now with master disabled, get a sample of level indications | ||
2896 | * for this interrupt. Indications will be cleared on related acks. | ||
2897 | * New indications can and will light up during processing, | ||
2898 | * and will generate new interrupt after enabling master. | ||
2899 | */ | ||
2900 | return raw_reg_read(regs, GEN8_MASTER_IRQ); | ||
2901 | } | ||
2902 | |||
2903 | static inline void gen8_master_intr_enable(void __iomem * const regs) | ||
2904 | { | ||
2905 | raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | ||
2906 | } | ||
2907 | |||
2890 | static irqreturn_t gen8_irq_handler(int irq, void *arg) | 2908 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
2891 | { | 2909 | { |
2892 | struct drm_i915_private *dev_priv = to_i915(arg); | 2910 | struct drm_i915_private *dev_priv = to_i915(arg); |
2911 | void __iomem * const regs = dev_priv->regs; | ||
2893 | u32 master_ctl; | 2912 | u32 master_ctl; |
2894 | u32 gt_iir[4]; | 2913 | u32 gt_iir[4]; |
2895 | 2914 | ||
2896 | if (!intel_irqs_enabled(dev_priv)) | 2915 | if (!intel_irqs_enabled(dev_priv)) |
2897 | return IRQ_NONE; | 2916 | return IRQ_NONE; |
2898 | 2917 | ||
2899 | master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); | 2918 | master_ctl = gen8_master_intr_disable(regs); |
2900 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | 2919 | if (!master_ctl) { |
2901 | if (!master_ctl) | 2920 | gen8_master_intr_enable(regs); |
2902 | return IRQ_NONE; | 2921 | return IRQ_NONE; |
2903 | 2922 | } | |
2904 | I915_WRITE_FW(GEN8_MASTER_IRQ, 0); | ||
2905 | 2923 | ||
2906 | /* Find, clear, then process each source of interrupt */ | 2924 | /* Find, clear, then process each source of interrupt */ |
2907 | gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); | 2925 | gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); |
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) | |||
2913 | enable_rpm_wakeref_asserts(dev_priv); | 2931 | enable_rpm_wakeref_asserts(dev_priv); |
2914 | } | 2932 | } |
2915 | 2933 | ||
2916 | I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 2934 | gen8_master_intr_enable(regs); |
2917 | 2935 | ||
2918 | gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); | 2936 | gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); |
2919 | 2937 | ||
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) | |||
3111 | intel_opregion_asle_intr(dev_priv); | 3129 | intel_opregion_asle_intr(dev_priv); |
3112 | } | 3130 | } |
3113 | 3131 | ||
3132 | static inline u32 gen11_master_intr_disable(void __iomem * const regs) | ||
3133 | { | ||
3134 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); | ||
3135 | |||
3136 | /* | ||
3137 | * Now with master disabled, get a sample of level indications | ||
3138 | * for this interrupt. Indications will be cleared on related acks. | ||
3139 | * New indications can and will light up during processing, | ||
3140 | * and will generate new interrupt after enabling master. | ||
3141 | */ | ||
3142 | return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); | ||
3143 | } | ||
3144 | |||
3145 | static inline void gen11_master_intr_enable(void __iomem * const regs) | ||
3146 | { | ||
3147 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); | ||
3148 | } | ||
3149 | |||
3114 | static irqreturn_t gen11_irq_handler(int irq, void *arg) | 3150 | static irqreturn_t gen11_irq_handler(int irq, void *arg) |
3115 | { | 3151 | { |
3116 | struct drm_i915_private * const i915 = to_i915(arg); | 3152 | struct drm_i915_private * const i915 = to_i915(arg); |
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) | |||
3121 | if (!intel_irqs_enabled(i915)) | 3157 | if (!intel_irqs_enabled(i915)) |
3122 | return IRQ_NONE; | 3158 | return IRQ_NONE; |
3123 | 3159 | ||
3124 | master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); | 3160 | master_ctl = gen11_master_intr_disable(regs); |
3125 | master_ctl &= ~GEN11_MASTER_IRQ; | 3161 | if (!master_ctl) { |
3126 | if (!master_ctl) | 3162 | gen11_master_intr_enable(regs); |
3127 | return IRQ_NONE; | 3163 | return IRQ_NONE; |
3128 | 3164 | } | |
3129 | /* Disable interrupts. */ | ||
3130 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); | ||
3131 | 3165 | ||
3132 | /* Find, clear, then process each source of interrupt. */ | 3166 | /* Find, clear, then process each source of interrupt. */ |
3133 | gen11_gt_irq_handler(i915, master_ctl); | 3167 | gen11_gt_irq_handler(i915, master_ctl); |
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) | |||
3147 | 3181 | ||
3148 | gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); | 3182 | gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); |
3149 | 3183 | ||
3150 | /* Acknowledge and enable interrupts. */ | 3184 | gen11_master_intr_enable(regs); |
3151 | raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); | ||
3152 | 3185 | ||
3153 | gen11_gu_misc_irq_handler(i915, gu_misc_iir); | 3186 | gen11_gu_misc_irq_handler(i915, gu_misc_iir); |
3154 | 3187 | ||
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev) | |||
3598 | struct drm_i915_private *dev_priv = to_i915(dev); | 3631 | struct drm_i915_private *dev_priv = to_i915(dev); |
3599 | int pipe; | 3632 | int pipe; |
3600 | 3633 | ||
3601 | I915_WRITE(GEN8_MASTER_IRQ, 0); | 3634 | gen8_master_intr_disable(dev_priv->regs); |
3602 | POSTING_READ(GEN8_MASTER_IRQ); | ||
3603 | 3635 | ||
3604 | gen8_gt_irq_reset(dev_priv); | 3636 | gen8_gt_irq_reset(dev_priv); |
3605 | 3637 | ||
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev) | |||
3641 | struct drm_i915_private *dev_priv = dev->dev_private; | 3673 | struct drm_i915_private *dev_priv = dev->dev_private; |
3642 | int pipe; | 3674 | int pipe; |
3643 | 3675 | ||
3644 | I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); | 3676 | gen11_master_intr_disable(dev_priv->regs); |
3645 | POSTING_READ(GEN11_GFX_MSTR_IRQ); | ||
3646 | 3677 | ||
3647 | gen11_gt_irq_reset(dev_priv); | 3678 | gen11_gt_irq_reset(dev_priv); |
3648 | 3679 | ||
3649 | I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); | 3680 | I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); |
3650 | 3681 | ||
3682 | I915_WRITE(EDP_PSR_IMR, 0xffffffff); | ||
3683 | I915_WRITE(EDP_PSR_IIR, 0xffffffff); | ||
3684 | |||
3651 | for_each_pipe(dev_priv, pipe) | 3685 | for_each_pipe(dev_priv, pipe) |
3652 | if (intel_display_power_is_enabled(dev_priv, | 3686 | if (intel_display_power_is_enabled(dev_priv, |
3653 | POWER_DOMAIN_PIPE(pipe))) | 3687 | POWER_DOMAIN_PIPE(pipe))) |
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) | |||
4244 | if (HAS_PCH_SPLIT(dev_priv)) | 4278 | if (HAS_PCH_SPLIT(dev_priv)) |
4245 | ibx_irq_postinstall(dev); | 4279 | ibx_irq_postinstall(dev); |
4246 | 4280 | ||
4247 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); | 4281 | gen8_master_intr_enable(dev_priv->regs); |
4248 | POSTING_READ(GEN8_MASTER_IRQ); | ||
4249 | 4282 | ||
4250 | return 0; | 4283 | return 0; |
4251 | } | 4284 | } |
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) | |||
4307 | 4340 | ||
4308 | I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); | 4341 | I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); |
4309 | 4342 | ||
4310 | I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); | 4343 | gen11_master_intr_enable(dev_priv->regs); |
4311 | POSTING_READ(GEN11_GFX_MSTR_IRQ); | ||
4312 | 4344 | ||
4313 | return 0; | 4345 | return 0; |
4314 | } | 4346 | } |
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
4834 | dev_priv->display_irqs_enabled = false; | 4866 | dev_priv->display_irqs_enabled = false; |
4835 | 4867 | ||
4836 | dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; | 4868 | dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; |
4869 | /* If we have MST support, we want to avoid doing short HPD IRQ storm | ||
4870 | * detection, as short HPD storms will occur as a natural part of | ||
4871 | * sideband messaging with MST. | ||
4872 | * On older platforms however, IRQ storms can occur with both long and | ||
4873 | * short pulses, as seen on some G4x systems. | ||
4874 | */ | ||
4875 | dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); | ||
4837 | 4876 | ||
4838 | dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; | 4877 | dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; |
4839 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 4878 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c index 4abd2e8b5083..4acdb94555b7 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.c +++ b/drivers/gpu/drm/i915/i915_oa_bdw.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h index b812d16162ac..0e667f1a8aa1 100644 --- a/drivers/gpu/drm/i915/i915_oa_bdw.h +++ b/drivers/gpu/drm/i915/i915_oa_bdw.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_BDW_H__ | 10 | #ifndef __I915_OA_BDW_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c index cb6f304ec16a..a44195c39923 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.c +++ b/drivers/gpu/drm/i915/i915_oa_bxt.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h index 690b963a2383..679e92cf4f1d 100644 --- a/drivers/gpu/drm/i915/i915_oa_bxt.h +++ b/drivers/gpu/drm/i915/i915_oa_bxt.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_BXT_H__ | 10 | #ifndef __I915_OA_BXT_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c index 8641ae30e343..7f60d51b8761 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h index 1f3268ef2ea2..4d6025559bbe 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_CFLGT2_H__ | 10 | #ifndef __I915_OA_CFLGT2_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 792facdb6702..a92c38e3a0ce 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h index c13b5aac01b9..0697f4077402 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_CFLGT3_H__ | 10 | #ifndef __I915_OA_CFLGT3_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c index 556febb2c3c8..71ec889a0114 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.c +++ b/drivers/gpu/drm/i915/i915_oa_chv.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h index b9622496979e..0986eae3135f 100644 --- a/drivers/gpu/drm/i915/i915_oa_chv.h +++ b/drivers/gpu/drm/i915/i915_oa_chv.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_CHV_H__ | 10 | #ifndef __I915_OA_CHV_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ba9140c87cc0..5c23d883d6c9 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h index fb918b131105..e830a406aff2 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.h +++ b/drivers/gpu/drm/i915/i915_oa_cnl.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_CNL_H__ | 10 | #ifndef __I915_OA_CNL_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c index 971db587957c..4bdda66df7d2 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.c +++ b/drivers/gpu/drm/i915/i915_oa_glk.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h index 63bd113f4bc9..06dedf991edb 100644 --- a/drivers/gpu/drm/i915/i915_oa_glk.h +++ b/drivers/gpu/drm/i915/i915_oa_glk.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_GLK_H__ | 10 | #ifndef __I915_OA_GLK_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c index 434a9b96d7ab..cc6526fdd2bd 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.c +++ b/drivers/gpu/drm/i915/i915_oa_hsw.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h index 74d03439c157..3d0c870cd0bd 100644 --- a/drivers/gpu/drm/i915/i915_oa_hsw.h +++ b/drivers/gpu/drm/i915/i915_oa_hsw.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_HSW_H__ | 10 | #ifndef __I915_OA_HSW_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c index a5667926e3de..baa51427a543 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.c +++ b/drivers/gpu/drm/i915/i915_oa_icl.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h index ae1c24aafe4f..24eaa97d61ba 100644 --- a/drivers/gpu/drm/i915/i915_oa_icl.h +++ b/drivers/gpu/drm/i915/i915_oa_icl.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_ICL_H__ | 10 | #ifndef __I915_OA_ICL_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c index 2fa98a40bbc8..168e49ab0d4d 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h index 25b803546dc1..a55398a904de 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_KBLGT2_H__ | 10 | #ifndef __I915_OA_KBLGT2_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c index f3cb6679a1bc..6ffa553c388e 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h index d5b5b5c1923e..3ddd3483b7cc 100644 --- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_KBLGT3_H__ | 10 | #ifndef __I915_OA_KBLGT3_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c index bf8b8cd8a50d..7ce6ee851d43 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h index fe1aa2c03958..be6256037239 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_SKLGT2_H__ | 10 | #ifndef __I915_OA_SKLGT2_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c index ae534c7c8135..086ca2631e1c 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h index 06746b2616c8..650beb068e56 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_SKLGT3_H__ | 10 | #ifndef __I915_OA_SKLGT3_H__ |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c index 817fba2d82df..b291a6eb8a87 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c +++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #include <linux/sysfs.h> | 10 | #include <linux/sysfs.h> |
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h index 944fd525c8b1..8dcf849d131e 100644 --- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h +++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h | |||
@@ -1,29 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | 2 | * SPDX-License-Identifier: MIT |
3 | * DO NOT EDIT manually! | ||
4 | * | ||
5 | * | ||
6 | * Copyright (c) 2015 Intel Corporation | ||
7 | * | 3 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Copyright © 2018 Intel Corporation |
9 | * copy of this software and associated documentation files (the "Software"), | ||
10 | * to deal in the Software without restriction, including without limitation | ||
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
12 | * and/or sell copies of the Software, and to permit persons to whom the | ||
13 | * Software is furnished to do so, subject to the following conditions: | ||
14 | * | ||
15 | * The above copyright notice and this permission notice (including the next | ||
16 | * paragraph) shall be included in all copies or substantial portions of the | ||
17 | * Software. | ||
18 | * | ||
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
22 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
23 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
25 | * IN THE SOFTWARE. | ||
26 | * | 5 | * |
6 | * Autogenerated file by GPU Top : https://github.com/rib/gputop | ||
7 | * DO NOT EDIT manually! | ||
27 | */ | 8 | */ |
28 | 9 | ||
29 | #ifndef __I915_OA_SKLGT4_H__ | 10 | #ifndef __I915_OA_SKLGT4_H__ |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 295e981e4a39..2e0356561839 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644, | |||
82 | "WARNING: Disabling this can cause system wide hangs. " | 82 | "WARNING: Disabling this can cause system wide hangs. " |
83 | "(default: true)"); | 83 | "(default: true)"); |
84 | 84 | ||
85 | i915_param_named_unsafe(enable_ppgtt, int, 0400, | ||
86 | "Override PPGTT usage. " | ||
87 | "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); | ||
88 | |||
89 | i915_param_named_unsafe(enable_psr, int, 0600, | 85 | i915_param_named_unsafe(enable_psr, int, 0600, |
90 | "Enable PSR " | 86 | "Enable PSR " |
91 | "(0=disabled, 1=enabled) " | 87 | "(0=disabled, 1=enabled) " |
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400, | |||
171 | i915_param_named(enable_dpcd_backlight, bool, 0600, | 167 | i915_param_named(enable_dpcd_backlight, bool, 0600, |
172 | "Enable support for DPCD backlight control (default:false)"); | 168 | "Enable support for DPCD backlight control (default:false)"); |
173 | 169 | ||
170 | #if IS_ENABLED(CONFIG_DRM_I915_GVT) | ||
174 | i915_param_named(enable_gvt, bool, 0400, | 171 | i915_param_named(enable_gvt, bool, 0400, |
175 | "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); | 172 | "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); |
173 | #endif | ||
176 | 174 | ||
177 | static __always_inline void _print_param(struct drm_printer *p, | 175 | static __always_inline void _print_param(struct drm_printer *p, |
178 | const char *name, | 176 | const char *name, |
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p, | |||
188 | else if (!__builtin_strcmp(type, "char *")) | 186 | else if (!__builtin_strcmp(type, "char *")) |
189 | drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); | 187 | drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); |
190 | else | 188 | else |
191 | BUILD_BUG(); | 189 | WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n", |
190 | type, name); | ||
192 | } | 191 | } |
193 | 192 | ||
194 | /** | 193 | /** |
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 6c4d4a21474b..7e56c516c815 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h | |||
@@ -41,7 +41,6 @@ struct drm_printer; | |||
41 | param(int, vbt_sdvo_panel_type, -1) \ | 41 | param(int, vbt_sdvo_panel_type, -1) \ |
42 | param(int, enable_dc, -1) \ | 42 | param(int, enable_dc, -1) \ |
43 | param(int, enable_fbc, -1) \ | 43 | param(int, enable_fbc, -1) \ |
44 | param(int, enable_ppgtt, -1) \ | ||
45 | param(int, enable_psr, -1) \ | 44 | param(int, enable_psr, -1) \ |
46 | param(int, disable_power_well, -1) \ | 45 | param(int, disable_power_well, -1) \ |
47 | param(int, enable_ips, 1) \ | 46 | param(int, enable_ips, 1) \ |
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index d6f7b9fe1d26..1b81d7cb209e 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c | |||
@@ -33,19 +33,30 @@ | |||
33 | #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) | 33 | #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) |
34 | 34 | ||
35 | #define GEN_DEFAULT_PIPEOFFSETS \ | 35 | #define GEN_DEFAULT_PIPEOFFSETS \ |
36 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | 36 | .pipe_offsets = { \ |
37 | PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ | 37 | [TRANSCODER_A] = PIPE_A_OFFSET, \ |
38 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | 38 | [TRANSCODER_B] = PIPE_B_OFFSET, \ |
39 | TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ | 39 | [TRANSCODER_C] = PIPE_C_OFFSET, \ |
40 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } | 40 | [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ |
41 | }, \ | ||
42 | .trans_offsets = { \ | ||
43 | [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ | ||
44 | [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ | ||
45 | [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ | ||
46 | [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ | ||
47 | } | ||
41 | 48 | ||
42 | #define GEN_CHV_PIPEOFFSETS \ | 49 | #define GEN_CHV_PIPEOFFSETS \ |
43 | .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ | 50 | .pipe_offsets = { \ |
44 | CHV_PIPE_C_OFFSET }, \ | 51 | [TRANSCODER_A] = PIPE_A_OFFSET, \ |
45 | .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ | 52 | [TRANSCODER_B] = PIPE_B_OFFSET, \ |
46 | CHV_TRANSCODER_C_OFFSET, }, \ | 53 | [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \ |
47 | .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ | 54 | }, \ |
48 | CHV_PALETTE_C_OFFSET } | 55 | .trans_offsets = { \ |
56 | [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ | ||
57 | [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ | ||
58 | [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ | ||
59 | } | ||
49 | 60 | ||
50 | #define CURSOR_OFFSETS \ | 61 | #define CURSOR_OFFSETS \ |
51 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } | 62 | .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } |
@@ -252,7 +263,7 @@ static const struct intel_device_info intel_ironlake_m_info = { | |||
252 | .has_llc = 1, \ | 263 | .has_llc = 1, \ |
253 | .has_rc6 = 1, \ | 264 | .has_rc6 = 1, \ |
254 | .has_rc6p = 1, \ | 265 | .has_rc6p = 1, \ |
255 | .has_aliasing_ppgtt = 1, \ | 266 | .ppgtt = INTEL_PPGTT_ALIASING, \ |
256 | GEN_DEFAULT_PIPEOFFSETS, \ | 267 | GEN_DEFAULT_PIPEOFFSETS, \ |
257 | GEN_DEFAULT_PAGE_SIZES, \ | 268 | GEN_DEFAULT_PAGE_SIZES, \ |
258 | CURSOR_OFFSETS | 269 | CURSOR_OFFSETS |
@@ -297,8 +308,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { | |||
297 | .has_llc = 1, \ | 308 | .has_llc = 1, \ |
298 | .has_rc6 = 1, \ | 309 | .has_rc6 = 1, \ |
299 | .has_rc6p = 1, \ | 310 | .has_rc6p = 1, \ |
300 | .has_aliasing_ppgtt = 1, \ | 311 | .ppgtt = INTEL_PPGTT_FULL, \ |
301 | .has_full_ppgtt = 1, \ | ||
302 | GEN_DEFAULT_PIPEOFFSETS, \ | 312 | GEN_DEFAULT_PIPEOFFSETS, \ |
303 | GEN_DEFAULT_PAGE_SIZES, \ | 313 | GEN_DEFAULT_PAGE_SIZES, \ |
304 | IVB_CURSOR_OFFSETS | 314 | IVB_CURSOR_OFFSETS |
@@ -351,8 +361,7 @@ static const struct intel_device_info intel_valleyview_info = { | |||
351 | .has_rc6 = 1, | 361 | .has_rc6 = 1, |
352 | .has_gmch_display = 1, | 362 | .has_gmch_display = 1, |
353 | .has_hotplug = 1, | 363 | .has_hotplug = 1, |
354 | .has_aliasing_ppgtt = 1, | 364 | .ppgtt = INTEL_PPGTT_FULL, |
355 | .has_full_ppgtt = 1, | ||
356 | .has_snoop = true, | 365 | .has_snoop = true, |
357 | .has_coherent_ggtt = false, | 366 | .has_coherent_ggtt = false, |
358 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, | 367 | .ring_mask = RENDER_RING | BSD_RING | BLT_RING, |
@@ -399,7 +408,7 @@ static const struct intel_device_info intel_haswell_gt3_info = { | |||
399 | .page_sizes = I915_GTT_PAGE_SIZE_4K | \ | 408 | .page_sizes = I915_GTT_PAGE_SIZE_4K | \ |
400 | I915_GTT_PAGE_SIZE_2M, \ | 409 | I915_GTT_PAGE_SIZE_2M, \ |
401 | .has_logical_ring_contexts = 1, \ | 410 | .has_logical_ring_contexts = 1, \ |
402 | .has_full_48bit_ppgtt = 1, \ | 411 | .ppgtt = INTEL_PPGTT_FULL_4LVL, \ |
403 | .has_64bit_reloc = 1, \ | 412 | .has_64bit_reloc = 1, \ |
404 | .has_reset_engine = 1 | 413 | .has_reset_engine = 1 |
405 | 414 | ||
@@ -443,8 +452,7 @@ static const struct intel_device_info intel_cherryview_info = { | |||
443 | .has_rc6 = 1, | 452 | .has_rc6 = 1, |
444 | .has_logical_ring_contexts = 1, | 453 | .has_logical_ring_contexts = 1, |
445 | .has_gmch_display = 1, | 454 | .has_gmch_display = 1, |
446 | .has_aliasing_ppgtt = 1, | 455 | .ppgtt = INTEL_PPGTT_FULL, |
447 | .has_full_ppgtt = 1, | ||
448 | .has_reset_engine = 1, | 456 | .has_reset_engine = 1, |
449 | .has_snoop = true, | 457 | .has_snoop = true, |
450 | .has_coherent_ggtt = false, | 458 | .has_coherent_ggtt = false, |
@@ -472,6 +480,8 @@ static const struct intel_device_info intel_cherryview_info = { | |||
472 | 480 | ||
473 | #define SKL_PLATFORM \ | 481 | #define SKL_PLATFORM \ |
474 | GEN9_FEATURES, \ | 482 | GEN9_FEATURES, \ |
483 | /* Display WA #0477 WaDisableIPC: skl */ \ | ||
484 | .has_ipc = 0, \ | ||
475 | PLATFORM(INTEL_SKYLAKE) | 485 | PLATFORM(INTEL_SKYLAKE) |
476 | 486 | ||
477 | static const struct intel_device_info intel_skylake_gt1_info = { | 487 | static const struct intel_device_info intel_skylake_gt1_info = { |
@@ -518,9 +528,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { | |||
518 | .has_logical_ring_contexts = 1, \ | 528 | .has_logical_ring_contexts = 1, \ |
519 | .has_logical_ring_preemption = 1, \ | 529 | .has_logical_ring_preemption = 1, \ |
520 | .has_guc = 1, \ | 530 | .has_guc = 1, \ |
521 | .has_aliasing_ppgtt = 1, \ | 531 | .ppgtt = INTEL_PPGTT_FULL_4LVL, \ |
522 | .has_full_ppgtt = 1, \ | ||
523 | .has_full_48bit_ppgtt = 1, \ | ||
524 | .has_reset_engine = 1, \ | 532 | .has_reset_engine = 1, \ |
525 | .has_snoop = true, \ | 533 | .has_snoop = true, \ |
526 | .has_coherent_ggtt = false, \ | 534 | .has_coherent_ggtt = false, \ |
@@ -598,6 +606,22 @@ static const struct intel_device_info intel_cannonlake_info = { | |||
598 | 606 | ||
599 | #define GEN11_FEATURES \ | 607 | #define GEN11_FEATURES \ |
600 | GEN10_FEATURES, \ | 608 | GEN10_FEATURES, \ |
609 | .pipe_offsets = { \ | ||
610 | [TRANSCODER_A] = PIPE_A_OFFSET, \ | ||
611 | [TRANSCODER_B] = PIPE_B_OFFSET, \ | ||
612 | [TRANSCODER_C] = PIPE_C_OFFSET, \ | ||
613 | [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \ | ||
614 | [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ | ||
615 | [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ | ||
616 | }, \ | ||
617 | .trans_offsets = { \ | ||
618 | [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ | ||
619 | [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ | ||
620 | [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ | ||
621 | [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ | ||
622 | [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ | ||
623 | [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ | ||
624 | }, \ | ||
601 | GEN(11), \ | 625 | GEN(11), \ |
602 | .ddb_size = 2048, \ | 626 | .ddb_size = 2048, \ |
603 | .has_logical_ring_elsq = 1 | 627 | .has_logical_ring_elsq = 1 |
@@ -663,7 +687,7 @@ static const struct pci_device_id pciidlist[] = { | |||
663 | INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), | 687 | INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), |
664 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), | 688 | INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), |
665 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), | 689 | INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), |
666 | INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), | 690 | INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info), |
667 | INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), | 691 | INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), |
668 | INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), | 692 | INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), |
669 | INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), | 693 | INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), |
@@ -671,6 +695,7 @@ static const struct pci_device_id pciidlist[] = { | |||
671 | INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), | 695 | INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), |
672 | INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), | 696 | INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), |
673 | INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), | 697 | INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), |
698 | INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), | ||
674 | INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), | 699 | INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), |
675 | INTEL_CNL_IDS(&intel_cannonlake_info), | 700 | INTEL_CNL_IDS(&intel_cannonlake_info), |
676 | INTEL_ICL_11_IDS(&intel_icelake_11_info), | 701 | INTEL_ICL_11_IDS(&intel_icelake_11_info), |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 664b96bb65a3..4529edfdcfc8 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream, | |||
890 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", | 890 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", |
891 | dev_priv->perf.oa.period_exponent); | 891 | dev_priv->perf.oa.period_exponent); |
892 | 892 | ||
893 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | 893 | dev_priv->perf.oa.ops.oa_disable(stream); |
894 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | 894 | dev_priv->perf.oa.ops.oa_enable(stream); |
895 | 895 | ||
896 | /* | 896 | /* |
897 | * Note: .oa_enable() is expected to re-init the oabuffer and | 897 | * Note: .oa_enable() is expected to re-init the oabuffer and |
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream, | |||
1114 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", | 1114 | DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", |
1115 | dev_priv->perf.oa.period_exponent); | 1115 | dev_priv->perf.oa.period_exponent); |
1116 | 1116 | ||
1117 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | 1117 | dev_priv->perf.oa.ops.oa_disable(stream); |
1118 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | 1118 | dev_priv->perf.oa.ops.oa_enable(stream); |
1119 | 1119 | ||
1120 | oastatus1 = I915_READ(GEN7_OASTATUS1); | 1120 | oastatus1 = I915_READ(GEN7_OASTATUS1); |
1121 | } | 1121 | } |
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) | |||
1528 | goto err_unpin; | 1528 | goto err_unpin; |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | dev_priv->perf.oa.ops.init_oa_buffer(dev_priv); | ||
1532 | |||
1533 | DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", | 1531 | DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", |
1534 | i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), | 1532 | i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma), |
1535 | dev_priv->perf.oa.oa_buffer.vaddr); | 1533 | dev_priv->perf.oa.oa_buffer.vaddr); |
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv, | |||
1563 | } | 1561 | } |
1564 | } | 1562 | } |
1565 | 1563 | ||
1566 | static int hsw_enable_metric_set(struct drm_i915_private *dev_priv, | 1564 | static int hsw_enable_metric_set(struct i915_perf_stream *stream) |
1567 | const struct i915_oa_config *oa_config) | ||
1568 | { | 1565 | { |
1566 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1567 | const struct i915_oa_config *oa_config = stream->oa_config; | ||
1568 | |||
1569 | /* PRM: | 1569 | /* PRM: |
1570 | * | 1570 | * |
1571 | * OA unit is using “crclk” for its functionality. When trunk | 1571 | * OA unit is using “crclk” for its functionality. When trunk |
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, | |||
1767 | return 0; | 1767 | return 0; |
1768 | } | 1768 | } |
1769 | 1769 | ||
1770 | static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, | 1770 | static int gen8_enable_metric_set(struct i915_perf_stream *stream) |
1771 | const struct i915_oa_config *oa_config) | ||
1772 | { | 1771 | { |
1772 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1773 | const struct i915_oa_config *oa_config = stream->oa_config; | ||
1773 | int ret; | 1774 | int ret; |
1774 | 1775 | ||
1775 | /* | 1776 | /* |
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv) | |||
1837 | I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); | 1838 | I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); |
1838 | } | 1839 | } |
1839 | 1840 | ||
1840 | static void gen7_oa_enable(struct drm_i915_private *dev_priv) | 1841 | static void gen7_oa_enable(struct i915_perf_stream *stream) |
1841 | { | 1842 | { |
1842 | struct i915_gem_context *ctx = | 1843 | struct drm_i915_private *dev_priv = stream->dev_priv; |
1843 | dev_priv->perf.oa.exclusive_stream->ctx; | 1844 | struct i915_gem_context *ctx = stream->ctx; |
1844 | u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; | 1845 | u32 ctx_id = dev_priv->perf.oa.specific_ctx_id; |
1845 | bool periodic = dev_priv->perf.oa.periodic; | 1846 | bool periodic = dev_priv->perf.oa.periodic; |
1846 | u32 period_exponent = dev_priv->perf.oa.period_exponent; | 1847 | u32 period_exponent = dev_priv->perf.oa.period_exponent; |
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv) | |||
1867 | GEN7_OACONTROL_ENABLE); | 1868 | GEN7_OACONTROL_ENABLE); |
1868 | } | 1869 | } |
1869 | 1870 | ||
1870 | static void gen8_oa_enable(struct drm_i915_private *dev_priv) | 1871 | static void gen8_oa_enable(struct i915_perf_stream *stream) |
1871 | { | 1872 | { |
1873 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1872 | u32 report_format = dev_priv->perf.oa.oa_buffer.format; | 1874 | u32 report_format = dev_priv->perf.oa.oa_buffer.format; |
1873 | 1875 | ||
1874 | /* | 1876 | /* |
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) | |||
1905 | { | 1907 | { |
1906 | struct drm_i915_private *dev_priv = stream->dev_priv; | 1908 | struct drm_i915_private *dev_priv = stream->dev_priv; |
1907 | 1909 | ||
1908 | dev_priv->perf.oa.ops.oa_enable(dev_priv); | 1910 | dev_priv->perf.oa.ops.oa_enable(stream); |
1909 | 1911 | ||
1910 | if (dev_priv->perf.oa.periodic) | 1912 | if (dev_priv->perf.oa.periodic) |
1911 | hrtimer_start(&dev_priv->perf.oa.poll_check_timer, | 1913 | hrtimer_start(&dev_priv->perf.oa.poll_check_timer, |
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) | |||
1913 | HRTIMER_MODE_REL_PINNED); | 1915 | HRTIMER_MODE_REL_PINNED); |
1914 | } | 1916 | } |
1915 | 1917 | ||
1916 | static void gen7_oa_disable(struct drm_i915_private *dev_priv) | 1918 | static void gen7_oa_disable(struct i915_perf_stream *stream) |
1917 | { | 1919 | { |
1920 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1921 | |||
1918 | I915_WRITE(GEN7_OACONTROL, 0); | 1922 | I915_WRITE(GEN7_OACONTROL, 0); |
1919 | if (intel_wait_for_register(dev_priv, | 1923 | if (intel_wait_for_register(dev_priv, |
1920 | GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, | 1924 | GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, |
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv) | |||
1922 | DRM_ERROR("wait for OA to be disabled timed out\n"); | 1926 | DRM_ERROR("wait for OA to be disabled timed out\n"); |
1923 | } | 1927 | } |
1924 | 1928 | ||
1925 | static void gen8_oa_disable(struct drm_i915_private *dev_priv) | 1929 | static void gen8_oa_disable(struct i915_perf_stream *stream) |
1926 | { | 1930 | { |
1931 | struct drm_i915_private *dev_priv = stream->dev_priv; | ||
1932 | |||
1927 | I915_WRITE(GEN8_OACONTROL, 0); | 1933 | I915_WRITE(GEN8_OACONTROL, 0); |
1928 | if (intel_wait_for_register(dev_priv, | 1934 | if (intel_wait_for_register(dev_priv, |
1929 | GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, | 1935 | GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, |
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream) | |||
1943 | { | 1949 | { |
1944 | struct drm_i915_private *dev_priv = stream->dev_priv; | 1950 | struct drm_i915_private *dev_priv = stream->dev_priv; |
1945 | 1951 | ||
1946 | dev_priv->perf.oa.ops.oa_disable(dev_priv); | 1952 | dev_priv->perf.oa.ops.oa_disable(stream); |
1947 | 1953 | ||
1948 | if (dev_priv->perf.oa.periodic) | 1954 | if (dev_priv->perf.oa.periodic) |
1949 | hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); | 1955 | hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer); |
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, | |||
1998 | return -EINVAL; | 2004 | return -EINVAL; |
1999 | } | 2005 | } |
2000 | 2006 | ||
2001 | if (!dev_priv->perf.oa.ops.init_oa_buffer) { | 2007 | if (!dev_priv->perf.oa.ops.enable_metric_set) { |
2002 | DRM_DEBUG("OA unit not supported\n"); | 2008 | DRM_DEBUG("OA unit not supported\n"); |
2003 | return -ENODEV; | 2009 | return -ENODEV; |
2004 | } | 2010 | } |
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, | |||
2092 | if (ret) | 2098 | if (ret) |
2093 | goto err_lock; | 2099 | goto err_lock; |
2094 | 2100 | ||
2095 | ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv, | 2101 | ret = dev_priv->perf.oa.ops.enable_metric_set(stream); |
2096 | stream->oa_config); | ||
2097 | if (ret) { | 2102 | if (ret) { |
2098 | DRM_DEBUG("Unable to enable metric set\n"); | 2103 | DRM_DEBUG("Unable to enable metric set\n"); |
2099 | goto err_enable; | 2104 | goto err_enable; |
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) | |||
3387 | dev_priv->perf.oa.ops.is_valid_mux_reg = | 3392 | dev_priv->perf.oa.ops.is_valid_mux_reg = |
3388 | hsw_is_valid_mux_addr; | 3393 | hsw_is_valid_mux_addr; |
3389 | dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; | 3394 | dev_priv->perf.oa.ops.is_valid_flex_reg = NULL; |
3390 | dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer; | ||
3391 | dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; | 3395 | dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set; |
3392 | dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; | 3396 | dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set; |
3393 | dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; | 3397 | dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable; |
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) | |||
3406 | */ | 3410 | */ |
3407 | dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; | 3411 | dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats; |
3408 | 3412 | ||
3409 | dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer; | ||
3410 | dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; | 3413 | dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable; |
3411 | dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; | 3414 | dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable; |
3412 | dev_priv->perf.oa.ops.read = gen8_oa_read; | 3415 | dev_priv->perf.oa.ops.read = gen8_oa_read; |
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index 3f502eef2431..6fc4b8eeab42 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c | |||
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, | |||
27 | 27 | ||
28 | slice_length = sizeof(sseu->slice_mask); | 28 | slice_length = sizeof(sseu->slice_mask); |
29 | subslice_length = sseu->max_slices * | 29 | subslice_length = sseu->max_slices * |
30 | DIV_ROUND_UP(sseu->max_subslices, | 30 | DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE); |
31 | sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE); | ||
32 | eu_length = sseu->max_slices * sseu->max_subslices * | 31 | eu_length = sseu->max_slices * sseu->max_subslices * |
33 | DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); | 32 | DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE); |
34 | 33 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7c491ea3d052..47baf2fe8f71 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) | |||
157 | /* | 157 | /* |
158 | * Named helper wrappers around _PICK_EVEN() and _PICK(). | 158 | * Named helper wrappers around _PICK_EVEN() and _PICK(). |
159 | */ | 159 | */ |
160 | #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) | 160 | #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) |
161 | #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) | 161 | #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) |
162 | #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) | 162 | #define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) |
163 | #define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b) | 163 | #define _PORT(port, a, b) _PICK_EVEN(port, a, b) |
164 | #define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b) | 164 | #define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) |
165 | #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) | 165 | |
166 | #define _PORT(port, a, b) _PICK_EVEN(port, a, b) | 166 | #define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b)) |
167 | #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) | 167 | #define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b)) |
168 | #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | 168 | #define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b)) |
169 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | 169 | #define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b)) |
170 | #define _PLL(pll, a, b) _PICK_EVEN(pll, a, b) | 170 | #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) |
171 | #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) | 171 | |
172 | #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) | 172 | #define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) |
173 | #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) | 173 | |
174 | #define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | ||
175 | #define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) | ||
176 | #define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) | ||
177 | |||
178 | /* | ||
179 | * Device info offset array based helpers for groups of registers with unevenly | ||
180 | * spaced base offsets. | ||
181 | */ | ||
182 | #define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ | ||
183 | dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ | ||
184 | dev_priv->info.display_mmio_offset) | ||
185 | #define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ | ||
186 | dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ | ||
187 | dev_priv->info.display_mmio_offset) | ||
188 | #define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ | ||
189 | dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ | ||
190 | dev_priv->info.display_mmio_offset) | ||
174 | 191 | ||
175 | #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) | 192 | #define __MASKED_FIELD(mask, value) ((mask) << 16 | (value)) |
176 | #define _MASKED_FIELD(mask, value) ({ \ | 193 | #define _MASKED_FIELD(mask, value) ({ \ |
@@ -1631,35 +1648,6 @@ enum i915_power_well_id { | |||
1631 | #define PHY_RESERVED (1 << 7) | 1648 | #define PHY_RESERVED (1 << 7) |
1632 | #define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) | 1649 | #define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC) |
1633 | 1650 | ||
1634 | #define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) | ||
1635 | #define CL_POWER_DOWN_ENABLE (1 << 4) | ||
1636 | #define SUS_CLOCK_CONFIG (3 << 0) | ||
1637 | |||
1638 | #define _ICL_PORT_CL_DW5_A 0x162014 | ||
1639 | #define _ICL_PORT_CL_DW5_B 0x6C014 | ||
1640 | #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ | ||
1641 | _ICL_PORT_CL_DW5_B) | ||
1642 | |||
1643 | #define _CNL_PORT_CL_DW10_A 0x162028 | ||
1644 | #define _ICL_PORT_CL_DW10_B 0x6c028 | ||
1645 | #define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \ | ||
1646 | _CNL_PORT_CL_DW10_A, \ | ||
1647 | _ICL_PORT_CL_DW10_B) | ||
1648 | #define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) | ||
1649 | #define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 | ||
1650 | #define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) | ||
1651 | #define PWR_UP_ALL_LANES (0x0 << 4) | ||
1652 | #define PWR_DOWN_LN_3_2_1 (0xe << 4) | ||
1653 | #define PWR_DOWN_LN_3_2 (0xc << 4) | ||
1654 | #define PWR_DOWN_LN_3 (0x8 << 4) | ||
1655 | #define PWR_DOWN_LN_2_1_0 (0x7 << 4) | ||
1656 | #define PWR_DOWN_LN_1_0 (0x3 << 4) | ||
1657 | #define PWR_DOWN_LN_1 (0x2 << 4) | ||
1658 | #define PWR_DOWN_LN_3_1 (0xa << 4) | ||
1659 | #define PWR_DOWN_LN_3_1_0 (0xb << 4) | ||
1660 | #define PWR_DOWN_LN_MASK (0xf << 4) | ||
1661 | #define PWR_DOWN_LN_SHIFT 4 | ||
1662 | |||
1663 | #define _PORT_CL1CM_DW9_A 0x162024 | 1651 | #define _PORT_CL1CM_DW9_A 0x162024 |
1664 | #define _PORT_CL1CM_DW9_BC 0x6C024 | 1652 | #define _PORT_CL1CM_DW9_BC 0x6C024 |
1665 | #define IREF0RC_OFFSET_SHIFT 8 | 1653 | #define IREF0RC_OFFSET_SHIFT 8 |
@@ -1672,13 +1660,6 @@ enum i915_power_well_id { | |||
1672 | #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) | 1660 | #define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT) |
1673 | #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) | 1661 | #define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC) |
1674 | 1662 | ||
1675 | #define _ICL_PORT_CL_DW12_A 0x162030 | ||
1676 | #define _ICL_PORT_CL_DW12_B 0x6C030 | ||
1677 | #define ICL_LANE_ENABLE_AUX (1 << 0) | ||
1678 | #define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \ | ||
1679 | _ICL_PORT_CL_DW12_A, \ | ||
1680 | _ICL_PORT_CL_DW12_B) | ||
1681 | |||
1682 | #define _PORT_CL1CM_DW28_A 0x162070 | 1663 | #define _PORT_CL1CM_DW28_A 0x162070 |
1683 | #define _PORT_CL1CM_DW28_BC 0x6C070 | 1664 | #define _PORT_CL1CM_DW28_BC 0x6C070 |
1684 | #define OCL1_POWER_DOWN_EN (1 << 23) | 1665 | #define OCL1_POWER_DOWN_EN (1 << 23) |
@@ -1691,6 +1672,74 @@ enum i915_power_well_id { | |||
1691 | #define OCL2_LDOFUSE_PWR_DIS (1 << 6) | 1672 | #define OCL2_LDOFUSE_PWR_DIS (1 << 6) |
1692 | #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) | 1673 | #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) |
1693 | 1674 | ||
1675 | /* | ||
1676 | * CNL/ICL Port/COMBO-PHY Registers | ||
1677 | */ | ||
1678 | #define _ICL_COMBOPHY_A 0x162000 | ||
1679 | #define _ICL_COMBOPHY_B 0x6C000 | ||
1680 | #define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \ | ||
1681 | _ICL_COMBOPHY_B) | ||
1682 | |||
1683 | /* CNL/ICL Port CL_DW registers */ | ||
1684 | #define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \ | ||
1685 | 4 * (dw)) | ||
1686 | |||
1687 | #define CNL_PORT_CL1CM_DW5 _MMIO(0x162014) | ||
1688 | #define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port)) | ||
1689 | #define CL_POWER_DOWN_ENABLE (1 << 4) | ||
1690 | #define SUS_CLOCK_CONFIG (3 << 0) | ||
1691 | |||
1692 | #define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port)) | ||
1693 | #define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) | ||
1694 | #define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 | ||
1695 | #define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) | ||
1696 | #define PWR_UP_ALL_LANES (0x0 << 4) | ||
1697 | #define PWR_DOWN_LN_3_2_1 (0xe << 4) | ||
1698 | #define PWR_DOWN_LN_3_2 (0xc << 4) | ||
1699 | #define PWR_DOWN_LN_3 (0x8 << 4) | ||
1700 | #define PWR_DOWN_LN_2_1_0 (0x7 << 4) | ||
1701 | #define PWR_DOWN_LN_1_0 (0x3 << 4) | ||
1702 | #define PWR_DOWN_LN_1 (0x2 << 4) | ||
1703 | #define PWR_DOWN_LN_3_1 (0xa << 4) | ||
1704 | #define PWR_DOWN_LN_3_1_0 (0xb << 4) | ||
1705 | #define PWR_DOWN_LN_MASK (0xf << 4) | ||
1706 | #define PWR_DOWN_LN_SHIFT 4 | ||
1707 | |||
1708 | #define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port)) | ||
1709 | #define ICL_LANE_ENABLE_AUX (1 << 0) | ||
1710 | |||
1711 | /* CNL/ICL Port COMP_DW registers */ | ||
1712 | #define _ICL_PORT_COMP 0x100 | ||
1713 | #define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \ | ||
1714 | _ICL_PORT_COMP + 4 * (dw)) | ||
1715 | |||
1716 | #define CNL_PORT_COMP_DW0 _MMIO(0x162100) | ||
1717 | #define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port)) | ||
1718 | #define COMP_INIT (1 << 31) | ||
1719 | |||
1720 | #define CNL_PORT_COMP_DW1 _MMIO(0x162104) | ||
1721 | #define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port)) | ||
1722 | |||
1723 | #define CNL_PORT_COMP_DW3 _MMIO(0x16210c) | ||
1724 | #define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port)) | ||
1725 | #define PROCESS_INFO_DOT_0 (0 << 26) | ||
1726 | #define PROCESS_INFO_DOT_1 (1 << 26) | ||
1727 | #define PROCESS_INFO_DOT_4 (2 << 26) | ||
1728 | #define PROCESS_INFO_MASK (7 << 26) | ||
1729 | #define PROCESS_INFO_SHIFT 26 | ||
1730 | #define VOLTAGE_INFO_0_85V (0 << 24) | ||
1731 | #define VOLTAGE_INFO_0_95V (1 << 24) | ||
1732 | #define VOLTAGE_INFO_1_05V (2 << 24) | ||
1733 | #define VOLTAGE_INFO_MASK (3 << 24) | ||
1734 | #define VOLTAGE_INFO_SHIFT 24 | ||
1735 | |||
1736 | #define CNL_PORT_COMP_DW9 _MMIO(0x162124) | ||
1737 | #define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port)) | ||
1738 | |||
1739 | #define CNL_PORT_COMP_DW10 _MMIO(0x162128) | ||
1740 | #define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port)) | ||
1741 | |||
1742 | /* CNL/ICL Port PCS registers */ | ||
1694 | #define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 | 1743 | #define _CNL_PORT_PCS_DW1_GRP_AE 0x162304 |
1695 | #define _CNL_PORT_PCS_DW1_GRP_B 0x162384 | 1744 | #define _CNL_PORT_PCS_DW1_GRP_B 0x162384 |
1696 | #define _CNL_PORT_PCS_DW1_GRP_C 0x162B04 | 1745 | #define _CNL_PORT_PCS_DW1_GRP_C 0x162B04 |
@@ -1708,7 +1757,6 @@ enum i915_power_well_id { | |||
1708 | _CNL_PORT_PCS_DW1_GRP_D, \ | 1757 | _CNL_PORT_PCS_DW1_GRP_D, \ |
1709 | _CNL_PORT_PCS_DW1_GRP_AE, \ | 1758 | _CNL_PORT_PCS_DW1_GRP_AE, \ |
1710 | _CNL_PORT_PCS_DW1_GRP_F)) | 1759 | _CNL_PORT_PCS_DW1_GRP_F)) |
1711 | |||
1712 | #define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ | 1760 | #define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \ |
1713 | _CNL_PORT_PCS_DW1_LN0_AE, \ | 1761 | _CNL_PORT_PCS_DW1_LN0_AE, \ |
1714 | _CNL_PORT_PCS_DW1_LN0_B, \ | 1762 | _CNL_PORT_PCS_DW1_LN0_B, \ |
@@ -1717,24 +1765,21 @@ enum i915_power_well_id { | |||
1717 | _CNL_PORT_PCS_DW1_LN0_AE, \ | 1765 | _CNL_PORT_PCS_DW1_LN0_AE, \ |
1718 | _CNL_PORT_PCS_DW1_LN0_F)) | 1766 | _CNL_PORT_PCS_DW1_LN0_F)) |
1719 | 1767 | ||
1720 | #define _ICL_PORT_PCS_DW1_GRP_A 0x162604 | 1768 | #define _ICL_PORT_PCS_AUX 0x300 |
1721 | #define _ICL_PORT_PCS_DW1_GRP_B 0x6C604 | 1769 | #define _ICL_PORT_PCS_GRP 0x600 |
1722 | #define _ICL_PORT_PCS_DW1_LN0_A 0x162804 | 1770 | #define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) |
1723 | #define _ICL_PORT_PCS_DW1_LN0_B 0x6C804 | 1771 | #define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ |
1724 | #define _ICL_PORT_PCS_DW1_AUX_A 0x162304 | 1772 | _ICL_PORT_PCS_AUX + 4 * (dw)) |
1725 | #define _ICL_PORT_PCS_DW1_AUX_B 0x6c304 | 1773 | #define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ |
1726 | #define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\ | 1774 | _ICL_PORT_PCS_GRP + 4 * (dw)) |
1727 | _ICL_PORT_PCS_DW1_GRP_A, \ | 1775 | #define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ |
1728 | _ICL_PORT_PCS_DW1_GRP_B) | 1776 | _ICL_PORT_PCS_LN(ln) + 4 * (dw)) |
1729 | #define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \ | 1777 | #define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port)) |
1730 | _ICL_PORT_PCS_DW1_LN0_A, \ | 1778 | #define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port)) |
1731 | _ICL_PORT_PCS_DW1_LN0_B) | 1779 | #define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port)) |
1732 | #define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \ | ||
1733 | _ICL_PORT_PCS_DW1_AUX_A, \ | ||
1734 | _ICL_PORT_PCS_DW1_AUX_B) | ||
1735 | #define COMMON_KEEPER_EN (1 << 26) | 1780 | #define COMMON_KEEPER_EN (1 << 26) |
1736 | 1781 | ||
1737 | /* CNL Port TX registers */ | 1782 | /* CNL/ICL Port TX registers */ |
1738 | #define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 | 1783 | #define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340 |
1739 | #define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 | 1784 | #define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0 |
1740 | #define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 | 1785 | #define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40 |
@@ -1762,23 +1807,22 @@ enum i915_power_well_id { | |||
1762 | _CNL_PORT_TX_F_LN0_OFFSET) + \ | 1807 | _CNL_PORT_TX_F_LN0_OFFSET) + \ |
1763 | 4 * (dw)) | 1808 | 4 * (dw)) |
1764 | 1809 | ||
1765 | #define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2)) | 1810 | #define _ICL_PORT_TX_AUX 0x380 |
1766 | #define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2)) | 1811 | #define _ICL_PORT_TX_GRP 0x680 |
1767 | #define _ICL_PORT_TX_DW2_GRP_A 0x162688 | 1812 | #define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) |
1768 | #define _ICL_PORT_TX_DW2_GRP_B 0x6C688 | 1813 | |
1769 | #define _ICL_PORT_TX_DW2_LN0_A 0x162888 | 1814 | #define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \ |
1770 | #define _ICL_PORT_TX_DW2_LN0_B 0x6C888 | 1815 | _ICL_PORT_TX_AUX + 4 * (dw)) |
1771 | #define _ICL_PORT_TX_DW2_AUX_A 0x162388 | 1816 | #define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \ |
1772 | #define _ICL_PORT_TX_DW2_AUX_B 0x6c388 | 1817 | _ICL_PORT_TX_GRP + 4 * (dw)) |
1773 | #define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \ | 1818 | #define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \ |
1774 | _ICL_PORT_TX_DW2_GRP_A, \ | 1819 | _ICL_PORT_TX_LN(ln) + 4 * (dw)) |
1775 | _ICL_PORT_TX_DW2_GRP_B) | 1820 | |
1776 | #define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \ | 1821 | #define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port)) |
1777 | _ICL_PORT_TX_DW2_LN0_A, \ | 1822 | #define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port)) |
1778 | _ICL_PORT_TX_DW2_LN0_B) | 1823 | #define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port)) |
1779 | #define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \ | 1824 | #define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port)) |
1780 | _ICL_PORT_TX_DW2_AUX_A, \ | 1825 | #define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port)) |
1781 | _ICL_PORT_TX_DW2_AUX_B) | ||
1782 | #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) | 1826 | #define SWING_SEL_UPPER(x) (((x) >> 3) << 15) |
1783 | #define SWING_SEL_UPPER_MASK (1 << 15) | 1827 | #define SWING_SEL_UPPER_MASK (1 << 15) |
1784 | #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) | 1828 | #define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) |
@@ -1795,24 +1839,10 @@ enum i915_power_well_id { | |||
1795 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ |
1796 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
1797 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
1798 | #define _ICL_PORT_TX_DW4_GRP_A 0x162690 | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
1799 | #define _ICL_PORT_TX_DW4_GRP_B 0x6C690 | 1843 | #define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) |
1800 | #define _ICL_PORT_TX_DW4_LN0_A 0x162890 | 1844 | #define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) |
1801 | #define _ICL_PORT_TX_DW4_LN1_A 0x162990 | 1845 | #define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) |
1802 | #define _ICL_PORT_TX_DW4_LN0_B 0x6C890 | ||
1803 | #define _ICL_PORT_TX_DW4_AUX_A 0x162390 | ||
1804 | #define _ICL_PORT_TX_DW4_AUX_B 0x6c390 | ||
1805 | #define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \ | ||
1806 | _ICL_PORT_TX_DW4_GRP_A, \ | ||
1807 | _ICL_PORT_TX_DW4_GRP_B) | ||
1808 | #define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \ | ||
1809 | _ICL_PORT_TX_DW4_LN0_A, \ | ||
1810 | _ICL_PORT_TX_DW4_LN0_B) + \ | ||
1811 | ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \ | ||
1812 | _ICL_PORT_TX_DW4_LN0_A))) | ||
1813 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \ | ||
1814 | _ICL_PORT_TX_DW4_AUX_A, \ | ||
1815 | _ICL_PORT_TX_DW4_AUX_B) | ||
1816 | #define LOADGEN_SELECT (1 << 31) | 1846 | #define LOADGEN_SELECT (1 << 31) |
1817 | #define POST_CURSOR_1(x) ((x) << 12) | 1847 | #define POST_CURSOR_1(x) ((x) << 12) |
1818 | #define POST_CURSOR_1_MASK (0x3F << 12) | 1848 | #define POST_CURSOR_1_MASK (0x3F << 12) |
@@ -1821,23 +1851,11 @@ enum i915_power_well_id { | |||
1821 | #define CURSOR_COEFF(x) ((x) << 0) | 1851 | #define CURSOR_COEFF(x) ((x) << 0) |
1822 | #define CURSOR_COEFF_MASK (0x3F << 0) | 1852 | #define CURSOR_COEFF_MASK (0x3F << 0) |
1823 | 1853 | ||
1824 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5)) | 1854 | #define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port)) |
1825 | #define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5)) | 1855 | #define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port)) |
1826 | #define _ICL_PORT_TX_DW5_GRP_A 0x162694 | 1856 | #define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port)) |
1827 | #define _ICL_PORT_TX_DW5_GRP_B 0x6C694 | 1857 | #define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port)) |
1828 | #define _ICL_PORT_TX_DW5_LN0_A 0x162894 | 1858 | #define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port)) |
1829 | #define _ICL_PORT_TX_DW5_LN0_B 0x6C894 | ||
1830 | #define _ICL_PORT_TX_DW5_AUX_A 0x162394 | ||
1831 | #define _ICL_PORT_TX_DW5_AUX_B 0x6c394 | ||
1832 | #define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \ | ||
1833 | _ICL_PORT_TX_DW5_GRP_A, \ | ||
1834 | _ICL_PORT_TX_DW5_GRP_B) | ||
1835 | #define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \ | ||
1836 | _ICL_PORT_TX_DW5_LN0_A, \ | ||
1837 | _ICL_PORT_TX_DW5_LN0_B) | ||
1838 | #define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \ | ||
1839 | _ICL_PORT_TX_DW5_AUX_A, \ | ||
1840 | _ICL_PORT_TX_DW5_AUX_B) | ||
1841 | #define TX_TRAINING_EN (1 << 31) | 1859 | #define TX_TRAINING_EN (1 << 31) |
1842 | #define TAP2_DISABLE (1 << 30) | 1860 | #define TAP2_DISABLE (1 << 30) |
1843 | #define TAP3_DISABLE (1 << 29) | 1861 | #define TAP3_DISABLE (1 << 29) |
@@ -2054,49 +2072,16 @@ enum i915_power_well_id { | |||
2054 | #define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) | 2072 | #define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) |
2055 | #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) | 2073 | #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) |
2056 | 2074 | ||
2057 | #define CNL_PORT_COMP_DW0 _MMIO(0x162100) | 2075 | #define FIA1_BASE 0x163000 |
2058 | #define COMP_INIT (1 << 31) | ||
2059 | #define CNL_PORT_COMP_DW1 _MMIO(0x162104) | ||
2060 | #define CNL_PORT_COMP_DW3 _MMIO(0x16210c) | ||
2061 | #define PROCESS_INFO_DOT_0 (0 << 26) | ||
2062 | #define PROCESS_INFO_DOT_1 (1 << 26) | ||
2063 | #define PROCESS_INFO_DOT_4 (2 << 26) | ||
2064 | #define PROCESS_INFO_MASK (7 << 26) | ||
2065 | #define PROCESS_INFO_SHIFT 26 | ||
2066 | #define VOLTAGE_INFO_0_85V (0 << 24) | ||
2067 | #define VOLTAGE_INFO_0_95V (1 << 24) | ||
2068 | #define VOLTAGE_INFO_1_05V (2 << 24) | ||
2069 | #define VOLTAGE_INFO_MASK (3 << 24) | ||
2070 | #define VOLTAGE_INFO_SHIFT 24 | ||
2071 | #define CNL_PORT_COMP_DW9 _MMIO(0x162124) | ||
2072 | #define CNL_PORT_COMP_DW10 _MMIO(0x162128) | ||
2073 | |||
2074 | #define _ICL_PORT_COMP_DW0_A 0x162100 | ||
2075 | #define _ICL_PORT_COMP_DW0_B 0x6C100 | ||
2076 | #define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \ | ||
2077 | _ICL_PORT_COMP_DW0_B) | ||
2078 | #define _ICL_PORT_COMP_DW1_A 0x162104 | ||
2079 | #define _ICL_PORT_COMP_DW1_B 0x6C104 | ||
2080 | #define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \ | ||
2081 | _ICL_PORT_COMP_DW1_B) | ||
2082 | #define _ICL_PORT_COMP_DW3_A 0x16210C | ||
2083 | #define _ICL_PORT_COMP_DW3_B 0x6C10C | ||
2084 | #define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \ | ||
2085 | _ICL_PORT_COMP_DW3_B) | ||
2086 | #define _ICL_PORT_COMP_DW9_A 0x162124 | ||
2087 | #define _ICL_PORT_COMP_DW9_B 0x6C124 | ||
2088 | #define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \ | ||
2089 | _ICL_PORT_COMP_DW9_B) | ||
2090 | #define _ICL_PORT_COMP_DW10_A 0x162128 | ||
2091 | #define _ICL_PORT_COMP_DW10_B 0x6C128 | ||
2092 | #define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \ | ||
2093 | _ICL_PORT_COMP_DW10_A, \ | ||
2094 | _ICL_PORT_COMP_DW10_B) | ||
2095 | 2076 | ||
2096 | /* ICL PHY DFLEX registers */ | 2077 | /* ICL PHY DFLEX registers */ |
2097 | #define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0) | 2078 | #define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0) |
2098 | #define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n))) | 2079 | #define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) |
2099 | #define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n))) | 2080 | #define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) |
2081 | #define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) | ||
2082 | #define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) | ||
2083 | #define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) | ||
2084 | #define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) | ||
2100 | 2085 | ||
2101 | /* BXT PHY Ref registers */ | 2086 | /* BXT PHY Ref registers */ |
2102 | #define _PORT_REF_DW3_A 0x16218C | 2087 | #define _PORT_REF_DW3_A 0x16218C |
@@ -2413,6 +2398,7 @@ enum i915_power_well_id { | |||
2413 | 2398 | ||
2414 | #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) | 2399 | #define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080) |
2415 | #define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF | 2400 | #define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF |
2401 | #define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) | ||
2416 | 2402 | ||
2417 | #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) | 2403 | #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) |
2418 | #define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) | 2404 | #define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31) |
@@ -2573,6 +2559,7 @@ enum i915_power_well_id { | |||
2573 | /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ | 2559 | /* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */ |
2574 | #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) | 2560 | #define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4) |
2575 | #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) | 2561 | #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) |
2562 | #define GEN11_ENABLE_32_PLANE_MODE (1 << 7) | ||
2576 | 2563 | ||
2577 | /* WaClearTdlStateAckDirtyBits */ | 2564 | /* WaClearTdlStateAckDirtyBits */ |
2578 | #define GEN8_STATE_ACK _MMIO(0x20F0) | 2565 | #define GEN8_STATE_ACK _MMIO(0x20F0) |
@@ -3475,11 +3462,13 @@ enum i915_power_well_id { | |||
3475 | /* | 3462 | /* |
3476 | * Palette regs | 3463 | * Palette regs |
3477 | */ | 3464 | */ |
3478 | #define PALETTE_A_OFFSET 0xa000 | 3465 | #define _PALETTE_A 0xa000 |
3479 | #define PALETTE_B_OFFSET 0xa800 | 3466 | #define _PALETTE_B 0xa800 |
3480 | #define CHV_PALETTE_C_OFFSET 0xc000 | 3467 | #define _CHV_PALETTE_C 0xc000 |
3481 | #define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \ | 3468 | #define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \ |
3482 | dev_priv->info.display_mmio_offset + (i) * 4) | 3469 | _PICK((pipe), _PALETTE_A, \ |
3470 | _PALETTE_B, _CHV_PALETTE_C) + \ | ||
3471 | (i) * 4) | ||
3483 | 3472 | ||
3484 | /* MCH MMIO space */ | 3473 | /* MCH MMIO space */ |
3485 | 3474 | ||
@@ -4061,15 +4050,27 @@ enum { | |||
4061 | #define _VSYNCSHIFT_B 0x61028 | 4050 | #define _VSYNCSHIFT_B 0x61028 |
4062 | #define _PIPE_MULT_B 0x6102c | 4051 | #define _PIPE_MULT_B 0x6102c |
4063 | 4052 | ||
4053 | /* DSI 0 timing regs */ | ||
4054 | #define _HTOTAL_DSI0 0x6b000 | ||
4055 | #define _HSYNC_DSI0 0x6b008 | ||
4056 | #define _VTOTAL_DSI0 0x6b00c | ||
4057 | #define _VSYNC_DSI0 0x6b014 | ||
4058 | #define _VSYNCSHIFT_DSI0 0x6b028 | ||
4059 | |||
4060 | /* DSI 1 timing regs */ | ||
4061 | #define _HTOTAL_DSI1 0x6b800 | ||
4062 | #define _HSYNC_DSI1 0x6b808 | ||
4063 | #define _VTOTAL_DSI1 0x6b80c | ||
4064 | #define _VSYNC_DSI1 0x6b814 | ||
4065 | #define _VSYNCSHIFT_DSI1 0x6b828 | ||
4066 | |||
4064 | #define TRANSCODER_A_OFFSET 0x60000 | 4067 | #define TRANSCODER_A_OFFSET 0x60000 |
4065 | #define TRANSCODER_B_OFFSET 0x61000 | 4068 | #define TRANSCODER_B_OFFSET 0x61000 |
4066 | #define TRANSCODER_C_OFFSET 0x62000 | 4069 | #define TRANSCODER_C_OFFSET 0x62000 |
4067 | #define CHV_TRANSCODER_C_OFFSET 0x63000 | 4070 | #define CHV_TRANSCODER_C_OFFSET 0x63000 |
4068 | #define TRANSCODER_EDP_OFFSET 0x6f000 | 4071 | #define TRANSCODER_EDP_OFFSET 0x6f000 |
4069 | 4072 | #define TRANSCODER_DSI0_OFFSET 0x6b000 | |
4070 | #define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \ | 4073 | #define TRANSCODER_DSI1_OFFSET 0x6b800 |
4071 | dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \ | ||
4072 | dev_priv->info.display_mmio_offset) | ||
4073 | 4074 | ||
4074 | #define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) | 4075 | #define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) |
4075 | #define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) | 4076 | #define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) |
@@ -4149,9 +4150,13 @@ enum { | |||
4149 | /* Bspec claims those aren't shifted but stay at 0x64800 */ | 4150 | /* Bspec claims those aren't shifted but stay at 0x64800 */ |
4150 | #define EDP_PSR_IMR _MMIO(0x64834) | 4151 | #define EDP_PSR_IMR _MMIO(0x64834) |
4151 | #define EDP_PSR_IIR _MMIO(0x64838) | 4152 | #define EDP_PSR_IIR _MMIO(0x64838) |
4152 | #define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31)) | 4153 | #define EDP_PSR_ERROR(shift) (1 << ((shift) + 2)) |
4153 | #define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31)) | 4154 | #define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1)) |
4154 | #define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31)) | 4155 | #define EDP_PSR_PRE_ENTRY(shift) (1 << (shift)) |
4156 | #define EDP_PSR_TRANSCODER_C_SHIFT 24 | ||
4157 | #define EDP_PSR_TRANSCODER_B_SHIFT 16 | ||
4158 | #define EDP_PSR_TRANSCODER_A_SHIFT 8 | ||
4159 | #define EDP_PSR_TRANSCODER_EDP_SHIFT 0 | ||
4155 | 4160 | ||
4156 | #define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) | 4161 | #define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) |
4157 | #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) | 4162 | #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) |
@@ -4195,7 +4200,7 @@ enum { | |||
4195 | #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) | 4200 | #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) |
4196 | #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) | 4201 | #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) |
4197 | #define EDP_PSR_DEBUG_MASK_HPD (1 << 25) | 4202 | #define EDP_PSR_DEBUG_MASK_HPD (1 << 25) |
4198 | #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) | 4203 | #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ |
4199 | #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ | 4204 | #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ |
4200 | 4205 | ||
4201 | #define EDP_PSR2_CTL _MMIO(0x6f900) | 4206 | #define EDP_PSR2_CTL _MMIO(0x6f900) |
@@ -4232,7 +4237,7 @@ enum { | |||
4232 | #define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) | 4237 | #define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9) |
4233 | #define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) | 4238 | #define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8) |
4234 | #define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) | 4239 | #define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6) |
4235 | #define PSR_EVENT_REGISTER_UPDATE (1 << 5) | 4240 | #define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */ |
4236 | #define PSR_EVENT_HDCP_ENABLE (1 << 4) | 4241 | #define PSR_EVENT_HDCP_ENABLE (1 << 4) |
4237 | #define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) | 4242 | #define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3) |
4238 | #define PSR_EVENT_VBI_ENABLE (1 << 2) | 4243 | #define PSR_EVENT_VBI_ENABLE (1 << 2) |
@@ -4584,6 +4589,15 @@ enum { | |||
4584 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) | 4589 | #define VIDEO_DIP_FREQ_2VSYNC (2 << 16) |
4585 | #define VIDEO_DIP_FREQ_MASK (3 << 16) | 4590 | #define VIDEO_DIP_FREQ_MASK (3 << 16) |
4586 | /* HSW and later: */ | 4591 | /* HSW and later: */ |
4592 | #define DRM_DIP_ENABLE (1 << 28) | ||
4593 | #define PSR_VSC_BIT_7_SET (1 << 27) | ||
4594 | #define VSC_SELECT_MASK (0x3 << 25) | ||
4595 | #define VSC_SELECT_SHIFT 25 | ||
4596 | #define VSC_DIP_HW_HEA_DATA (0 << 25) | ||
4597 | #define VSC_DIP_HW_HEA_SW_DATA (1 << 25) | ||
4598 | #define VSC_DIP_HW_DATA_SW_HEA (2 << 25) | ||
4599 | #define VSC_DIP_SW_HEA_DATA (3 << 25) | ||
4600 | #define VDIP_ENABLE_PPS (1 << 24) | ||
4587 | #define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) | 4601 | #define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20) |
4588 | #define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) | 4602 | #define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16) |
4589 | #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) | 4603 | #define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12) |
@@ -4591,16 +4605,6 @@ enum { | |||
4591 | #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) | 4605 | #define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4) |
4592 | #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) | 4606 | #define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0) |
4593 | 4607 | ||
4594 | #define DRM_DIP_ENABLE (1 << 28) | ||
4595 | #define PSR_VSC_BIT_7_SET (1 << 27) | ||
4596 | #define VSC_SELECT_MASK (0x3 << 26) | ||
4597 | #define VSC_SELECT_SHIFT 26 | ||
4598 | #define VSC_DIP_HW_HEA_DATA (0 << 26) | ||
4599 | #define VSC_DIP_HW_HEA_SW_DATA (1 << 26) | ||
4600 | #define VSC_DIP_HW_DATA_SW_HEA (2 << 26) | ||
4601 | #define VSC_DIP_SW_HEA_DATA (3 << 26) | ||
4602 | #define VDIP_ENABLE_PPS (1 << 24) | ||
4603 | |||
4604 | /* Panel power sequencing */ | 4608 | /* Panel power sequencing */ |
4605 | #define PPS_BASE 0x61200 | 4609 | #define PPS_BASE 0x61200 |
4606 | #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) | 4610 | #define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE) |
@@ -5636,9 +5640,9 @@ enum { | |||
5636 | */ | 5640 | */ |
5637 | #define PIPE_EDP_OFFSET 0x7f000 | 5641 | #define PIPE_EDP_OFFSET 0x7f000 |
5638 | 5642 | ||
5639 | #define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \ | 5643 | /* ICL DSI 0 and 1 */ |
5640 | dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \ | 5644 | #define PIPE_DSI0_OFFSET 0x7b000 |
5641 | dev_priv->info.display_mmio_offset) | 5645 | #define PIPE_DSI1_OFFSET 0x7b800 |
5642 | 5646 | ||
5643 | #define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) | 5647 | #define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) |
5644 | #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) | 5648 | #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) |
@@ -6087,10 +6091,6 @@ enum { | |||
6087 | #define _CURBBASE_IVB 0x71084 | 6091 | #define _CURBBASE_IVB 0x71084 |
6088 | #define _CURBPOS_IVB 0x71088 | 6092 | #define _CURBPOS_IVB 0x71088 |
6089 | 6093 | ||
6090 | #define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \ | ||
6091 | dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \ | ||
6092 | dev_priv->info.display_mmio_offset) | ||
6093 | |||
6094 | #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) | 6094 | #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) |
6095 | #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) | 6095 | #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) |
6096 | #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) | 6096 | #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) |
@@ -6224,6 +6224,10 @@ enum { | |||
6224 | #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) | 6224 | #define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4) |
6225 | #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) | 6225 | #define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC) |
6226 | 6226 | ||
6227 | /* ICL DSI 0 and 1 */ | ||
6228 | #define _PIPEDSI0CONF 0x7b008 | ||
6229 | #define _PIPEDSI1CONF 0x7b808 | ||
6230 | |||
6227 | /* Sprite A control */ | 6231 | /* Sprite A control */ |
6228 | #define _DVSACNTR 0x72180 | 6232 | #define _DVSACNTR 0x72180 |
6229 | #define DVS_ENABLE (1 << 31) | 6233 | #define DVS_ENABLE (1 << 31) |
@@ -6511,6 +6515,7 @@ enum { | |||
6511 | #define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) | 6515 | #define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) |
6512 | #define PLANE_CTL_ORDER_BGRX (0 << 20) | 6516 | #define PLANE_CTL_ORDER_BGRX (0 << 20) |
6513 | #define PLANE_CTL_ORDER_RGBX (1 << 20) | 6517 | #define PLANE_CTL_ORDER_RGBX (1 << 20) |
6518 | #define PLANE_CTL_YUV420_Y_PLANE (1 << 19) | ||
6514 | #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) | 6519 | #define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) |
6515 | #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) | 6520 | #define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) |
6516 | #define PLANE_CTL_YUV422_YUYV (0 << 16) | 6521 | #define PLANE_CTL_YUV422_YUYV (0 << 16) |
@@ -6554,17 +6559,33 @@ enum { | |||
6554 | #define _PLANE_KEYVAL_2_A 0x70294 | 6559 | #define _PLANE_KEYVAL_2_A 0x70294 |
6555 | #define _PLANE_KEYMSK_1_A 0x70198 | 6560 | #define _PLANE_KEYMSK_1_A 0x70198 |
6556 | #define _PLANE_KEYMSK_2_A 0x70298 | 6561 | #define _PLANE_KEYMSK_2_A 0x70298 |
6562 | #define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31) | ||
6557 | #define _PLANE_KEYMAX_1_A 0x701a0 | 6563 | #define _PLANE_KEYMAX_1_A 0x701a0 |
6558 | #define _PLANE_KEYMAX_2_A 0x702a0 | 6564 | #define _PLANE_KEYMAX_2_A 0x702a0 |
6565 | #define PLANE_KEYMAX_ALPHA(a) ((a) << 24) | ||
6559 | #define _PLANE_AUX_DIST_1_A 0x701c0 | 6566 | #define _PLANE_AUX_DIST_1_A 0x701c0 |
6560 | #define _PLANE_AUX_DIST_2_A 0x702c0 | 6567 | #define _PLANE_AUX_DIST_2_A 0x702c0 |
6561 | #define _PLANE_AUX_OFFSET_1_A 0x701c4 | 6568 | #define _PLANE_AUX_OFFSET_1_A 0x701c4 |
6562 | #define _PLANE_AUX_OFFSET_2_A 0x702c4 | 6569 | #define _PLANE_AUX_OFFSET_2_A 0x702c4 |
6570 | #define _PLANE_CUS_CTL_1_A 0x701c8 | ||
6571 | #define _PLANE_CUS_CTL_2_A 0x702c8 | ||
6572 | #define PLANE_CUS_ENABLE (1 << 31) | ||
6573 | #define PLANE_CUS_PLANE_6 (0 << 30) | ||
6574 | #define PLANE_CUS_PLANE_7 (1 << 30) | ||
6575 | #define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) | ||
6576 | #define PLANE_CUS_HPHASE_0 (0 << 16) | ||
6577 | #define PLANE_CUS_HPHASE_0_25 (1 << 16) | ||
6578 | #define PLANE_CUS_HPHASE_0_5 (2 << 16) | ||
6579 | #define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15) | ||
6580 | #define PLANE_CUS_VPHASE_0 (0 << 12) | ||
6581 | #define PLANE_CUS_VPHASE_0_25 (1 << 12) | ||
6582 | #define PLANE_CUS_VPHASE_0_5 (2 << 12) | ||
6563 | #define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ | 6583 | #define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ |
6564 | #define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ | 6584 | #define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ |
6565 | #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ | 6585 | #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ |
6566 | #define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ | 6586 | #define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ |
6567 | #define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) | 6587 | #define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) |
6588 | #define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */ | ||
6568 | #define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ | 6589 | #define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ |
6569 | #define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) | 6590 | #define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) |
6570 | #define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17) | 6591 | #define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17) |
@@ -6581,6 +6602,55 @@ enum { | |||
6581 | #define _PLANE_NV12_BUF_CFG_1_A 0x70278 | 6602 | #define _PLANE_NV12_BUF_CFG_1_A 0x70278 |
6582 | #define _PLANE_NV12_BUF_CFG_2_A 0x70378 | 6603 | #define _PLANE_NV12_BUF_CFG_2_A 0x70378 |
6583 | 6604 | ||
6605 | /* Input CSC Register Definitions */ | ||
6606 | #define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0 | ||
6607 | #define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0 | ||
6608 | |||
6609 | #define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0 | ||
6610 | #define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0 | ||
6611 | |||
6612 | #define _PLANE_INPUT_CSC_RY_GY_1(pipe) \ | ||
6613 | _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \ | ||
6614 | _PLANE_INPUT_CSC_RY_GY_1_B) | ||
6615 | #define _PLANE_INPUT_CSC_RY_GY_2(pipe) \ | ||
6616 | _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \ | ||
6617 | _PLANE_INPUT_CSC_RY_GY_2_B) | ||
6618 | |||
6619 | #define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \ | ||
6620 | _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \ | ||
6621 | _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4) | ||
6622 | |||
6623 | #define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8 | ||
6624 | #define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8 | ||
6625 | |||
6626 | #define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8 | ||
6627 | #define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8 | ||
6628 | |||
6629 | #define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \ | ||
6630 | _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \ | ||
6631 | _PLANE_INPUT_CSC_PREOFF_HI_1_B) | ||
6632 | #define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \ | ||
6633 | _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \ | ||
6634 | _PLANE_INPUT_CSC_PREOFF_HI_2_B) | ||
6635 | #define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \ | ||
6636 | _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \ | ||
6637 | _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4) | ||
6638 | |||
6639 | #define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204 | ||
6640 | #define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304 | ||
6641 | |||
6642 | #define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204 | ||
6643 | #define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304 | ||
6644 | |||
6645 | #define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \ | ||
6646 | _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \ | ||
6647 | _PLANE_INPUT_CSC_POSTOFF_HI_1_B) | ||
6648 | #define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \ | ||
6649 | _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \ | ||
6650 | _PLANE_INPUT_CSC_POSTOFF_HI_2_B) | ||
6651 | #define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \ | ||
6652 | _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \ | ||
6653 | _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4) | ||
6584 | 6654 | ||
6585 | #define _PLANE_CTL_1_B 0x71180 | 6655 | #define _PLANE_CTL_1_B 0x71180 |
6586 | #define _PLANE_CTL_2_B 0x71280 | 6656 | #define _PLANE_CTL_2_B 0x71280 |
@@ -6697,6 +6767,15 @@ enum { | |||
6697 | #define PLANE_AUX_OFFSET(pipe, plane) \ | 6767 | #define PLANE_AUX_OFFSET(pipe, plane) \ |
6698 | _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe)) | 6768 | _MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe)) |
6699 | 6769 | ||
6770 | #define _PLANE_CUS_CTL_1_B 0x711c8 | ||
6771 | #define _PLANE_CUS_CTL_2_B 0x712c8 | ||
6772 | #define _PLANE_CUS_CTL_1(pipe) \ | ||
6773 | _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B) | ||
6774 | #define _PLANE_CUS_CTL_2(pipe) \ | ||
6775 | _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B) | ||
6776 | #define PLANE_CUS_CTL(pipe, plane) \ | ||
6777 | _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe)) | ||
6778 | |||
6700 | #define _PLANE_COLOR_CTL_1_B 0x711CC | 6779 | #define _PLANE_COLOR_CTL_1_B 0x711CC |
6701 | #define _PLANE_COLOR_CTL_2_B 0x712CC | 6780 | #define _PLANE_COLOR_CTL_2_B 0x712CC |
6702 | #define _PLANE_COLOR_CTL_3_B 0x713CC | 6781 | #define _PLANE_COLOR_CTL_3_B 0x713CC |
@@ -6850,11 +6929,12 @@ enum { | |||
6850 | #define _PS_2B_CTRL 0x68A80 | 6929 | #define _PS_2B_CTRL 0x68A80 |
6851 | #define _PS_1C_CTRL 0x69180 | 6930 | #define _PS_1C_CTRL 0x69180 |
6852 | #define PS_SCALER_EN (1 << 31) | 6931 | #define PS_SCALER_EN (1 << 31) |
6853 | #define PS_SCALER_MODE_MASK (3 << 28) | 6932 | #define SKL_PS_SCALER_MODE_MASK (3 << 28) |
6854 | #define PS_SCALER_MODE_DYN (0 << 28) | 6933 | #define SKL_PS_SCALER_MODE_DYN (0 << 28) |
6855 | #define PS_SCALER_MODE_HQ (1 << 28) | 6934 | #define SKL_PS_SCALER_MODE_HQ (1 << 28) |
6856 | #define SKL_PS_SCALER_MODE_NV12 (2 << 28) | 6935 | #define SKL_PS_SCALER_MODE_NV12 (2 << 28) |
6857 | #define PS_SCALER_MODE_PLANAR (1 << 29) | 6936 | #define PS_SCALER_MODE_PLANAR (1 << 29) |
6937 | #define PS_SCALER_MODE_NORMAL (0 << 29) | ||
6858 | #define PS_PLANE_SEL_MASK (7 << 25) | 6938 | #define PS_PLANE_SEL_MASK (7 << 25) |
6859 | #define PS_PLANE_SEL(plane) (((plane) + 1) << 25) | 6939 | #define PS_PLANE_SEL(plane) (((plane) + 1) << 25) |
6860 | #define PS_FILTER_MASK (3 << 23) | 6940 | #define PS_FILTER_MASK (3 << 23) |
@@ -6871,6 +6951,8 @@ enum { | |||
6871 | #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) | 6951 | #define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5) |
6872 | #define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) | 6952 | #define PS_VADAPT_MODE_MOD_ADAPT (1 << 5) |
6873 | #define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) | 6953 | #define PS_VADAPT_MODE_MOST_ADAPT (3 << 5) |
6954 | #define PS_PLANE_Y_SEL_MASK (7 << 5) | ||
6955 | #define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5) | ||
6874 | 6956 | ||
6875 | #define _PS_PWR_GATE_1A 0x68160 | 6957 | #define _PS_PWR_GATE_1A 0x68160 |
6876 | #define _PS_PWR_GATE_2A 0x68260 | 6958 | #define _PS_PWR_GATE_2A 0x68260 |
@@ -7317,9 +7399,10 @@ enum { | |||
7317 | #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) | 7399 | #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) |
7318 | #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) | 7400 | #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) |
7319 | 7401 | ||
7320 | #define CHICKEN_TRANS_A 0x420c0 | 7402 | #define CHICKEN_TRANS_A _MMIO(0x420c0) |
7321 | #define CHICKEN_TRANS_B 0x420c4 | 7403 | #define CHICKEN_TRANS_B _MMIO(0x420c4) |
7322 | #define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B) | 7404 | #define CHICKEN_TRANS_C _MMIO(0x420c8) |
7405 | #define CHICKEN_TRANS_EDP _MMIO(0x420cc) | ||
7323 | #define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ | 7406 | #define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ |
7324 | #define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) | 7407 | #define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) |
7325 | #define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) | 7408 | #define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) |
@@ -7409,6 +7492,10 @@ enum { | |||
7409 | #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) | 7492 | #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) |
7410 | #define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) | 7493 | #define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11) |
7411 | 7494 | ||
7495 | #define GEN7_SARCHKMD _MMIO(0xB000) | ||
7496 | #define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31) | ||
7497 | #define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30) | ||
7498 | |||
7412 | #define GEN7_L3SQCREG1 _MMIO(0xB010) | 7499 | #define GEN7_L3SQCREG1 _MMIO(0xB010) |
7413 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 | 7500 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 |
7414 | 7501 | ||
@@ -7824,8 +7911,7 @@ enum { | |||
7824 | #define CNP_RAWCLK_DIV_MASK (0x3ff << 16) | 7911 | #define CNP_RAWCLK_DIV_MASK (0x3ff << 16) |
7825 | #define CNP_RAWCLK_DIV(div) ((div) << 16) | 7912 | #define CNP_RAWCLK_DIV(div) ((div) << 16) |
7826 | #define CNP_RAWCLK_FRAC_MASK (0xf << 26) | 7913 | #define CNP_RAWCLK_FRAC_MASK (0xf << 26) |
7827 | #define CNP_RAWCLK_FRAC(frac) ((frac) << 26) | 7914 | #define CNP_RAWCLK_DEN(den) ((den) << 26) |
7828 | #define ICP_RAWCLK_DEN(den) ((den) << 26) | ||
7829 | #define ICP_RAWCLK_NUM(num) ((num) << 11) | 7915 | #define ICP_RAWCLK_NUM(num) ((num) << 11) |
7830 | 7916 | ||
7831 | #define PCH_DPLL_TMR_CFG _MMIO(0xc6208) | 7917 | #define PCH_DPLL_TMR_CFG _MMIO(0xc6208) |
@@ -8625,8 +8711,7 @@ enum { | |||
8625 | #define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) | 8711 | #define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9) |
8626 | #define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) | 8712 | #define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7) |
8627 | 8713 | ||
8628 | #define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080) | 8714 | #define GEN10_SAMPLER_MODE _MMIO(0xE18C) |
8629 | #define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7) | ||
8630 | 8715 | ||
8631 | /* IVYBRIDGE DPF */ | 8716 | /* IVYBRIDGE DPF */ |
8632 | #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ | 8717 | #define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */ |
@@ -8927,6 +9012,15 @@ enum skl_power_gate { | |||
8927 | #define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) | 9012 | #define CNL_AUX_ANAOVRD1_ENABLE (1 << 16) |
8928 | #define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) | 9013 | #define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23) |
8929 | 9014 | ||
9015 | #define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) | ||
9016 | #define _ICL_AUX_ANAOVRD1_A 0x162398 | ||
9017 | #define _ICL_AUX_ANAOVRD1_B 0x6C398 | ||
9018 | #define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \ | ||
9019 | _ICL_AUX_ANAOVRD1_A, \ | ||
9020 | _ICL_AUX_ANAOVRD1_B)) | ||
9021 | #define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7) | ||
9022 | #define ICL_AUX_ANAOVRD1_ENABLE (1 << 0) | ||
9023 | |||
8930 | /* HDCP Key Registers */ | 9024 | /* HDCP Key Registers */ |
8931 | #define HDCP_KEY_CONF _MMIO(0x66c00) | 9025 | #define HDCP_KEY_CONF _MMIO(0x66c00) |
8932 | #define HDCP_AKSV_SEND_TRIGGER BIT(31) | 9026 | #define HDCP_AKSV_SEND_TRIGGER BIT(31) |
@@ -9009,11 +9103,45 @@ enum skl_power_gate { | |||
9009 | #define HDCP_STATUS_CIPHER BIT(16) | 9103 | #define HDCP_STATUS_CIPHER BIT(16) |
9010 | #define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) | 9104 | #define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff) |
9011 | 9105 | ||
9106 | /* HDCP2.2 Registers */ | ||
9107 | #define _PORTA_HDCP2_BASE 0x66800 | ||
9108 | #define _PORTB_HDCP2_BASE 0x66500 | ||
9109 | #define _PORTC_HDCP2_BASE 0x66600 | ||
9110 | #define _PORTD_HDCP2_BASE 0x66700 | ||
9111 | #define _PORTE_HDCP2_BASE 0x66A00 | ||
9112 | #define _PORTF_HDCP2_BASE 0x66900 | ||
9113 | #define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \ | ||
9114 | _PORTA_HDCP2_BASE, \ | ||
9115 | _PORTB_HDCP2_BASE, \ | ||
9116 | _PORTC_HDCP2_BASE, \ | ||
9117 | _PORTD_HDCP2_BASE, \ | ||
9118 | _PORTE_HDCP2_BASE, \ | ||
9119 | _PORTF_HDCP2_BASE) + (x)) | ||
9120 | |||
9121 | #define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98) | ||
9122 | #define AUTH_LINK_AUTHENTICATED BIT(31) | ||
9123 | #define AUTH_LINK_TYPE BIT(30) | ||
9124 | #define AUTH_FORCE_CLR_INPUTCTR BIT(19) | ||
9125 | #define AUTH_CLR_KEYS BIT(18) | ||
9126 | |||
9127 | #define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0) | ||
9128 | #define CTL_LINK_ENCRYPTION_REQ BIT(31) | ||
9129 | |||
9130 | #define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4) | ||
9131 | #define STREAM_ENCRYPTION_STATUS_A BIT(31) | ||
9132 | #define STREAM_ENCRYPTION_STATUS_B BIT(30) | ||
9133 | #define STREAM_ENCRYPTION_STATUS_C BIT(29) | ||
9134 | #define LINK_TYPE_STATUS BIT(22) | ||
9135 | #define LINK_AUTH_STATUS BIT(21) | ||
9136 | #define LINK_ENCRYPTION_STATUS BIT(20) | ||
9137 | |||
9012 | /* Per-pipe DDI Function Control */ | 9138 | /* Per-pipe DDI Function Control */ |
9013 | #define _TRANS_DDI_FUNC_CTL_A 0x60400 | 9139 | #define _TRANS_DDI_FUNC_CTL_A 0x60400 |
9014 | #define _TRANS_DDI_FUNC_CTL_B 0x61400 | 9140 | #define _TRANS_DDI_FUNC_CTL_B 0x61400 |
9015 | #define _TRANS_DDI_FUNC_CTL_C 0x62400 | 9141 | #define _TRANS_DDI_FUNC_CTL_C 0x62400 |
9016 | #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 | 9142 | #define _TRANS_DDI_FUNC_CTL_EDP 0x6F400 |
9143 | #define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400 | ||
9144 | #define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00 | ||
9017 | #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) | 9145 | #define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A) |
9018 | 9146 | ||
9019 | #define TRANS_DDI_FUNC_ENABLE (1 << 31) | 9147 | #define TRANS_DDI_FUNC_ENABLE (1 << 31) |
@@ -9051,6 +9179,19 @@ enum skl_power_gate { | |||
9051 | | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ | 9179 | | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \ |
9052 | | TRANS_DDI_HDMI_SCRAMBLING) | 9180 | | TRANS_DDI_HDMI_SCRAMBLING) |
9053 | 9181 | ||
9182 | #define _TRANS_DDI_FUNC_CTL2_A 0x60404 | ||
9183 | #define _TRANS_DDI_FUNC_CTL2_B 0x61404 | ||
9184 | #define _TRANS_DDI_FUNC_CTL2_C 0x62404 | ||
9185 | #define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404 | ||
9186 | #define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404 | ||
9187 | #define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04 | ||
9188 | #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ | ||
9189 | _TRANS_DDI_FUNC_CTL2_A) | ||
9190 | #define PORT_SYNC_MODE_ENABLE (1 << 4) | ||
9191 | #define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) | ||
9192 | #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) | ||
9193 | #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 | ||
9194 | |||
9054 | /* DisplayPort Transport Control */ | 9195 | /* DisplayPort Transport Control */ |
9055 | #define _DP_TP_CTL_A 0x64040 | 9196 | #define _DP_TP_CTL_A 0x64040 |
9056 | #define _DP_TP_CTL_B 0x64140 | 9197 | #define _DP_TP_CTL_B 0x64140 |
@@ -9222,6 +9363,8 @@ enum skl_power_gate { | |||
9222 | #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) | 9363 | #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) |
9223 | 9364 | ||
9224 | #define TRANS_MSA_SYNC_CLK (1 << 0) | 9365 | #define TRANS_MSA_SYNC_CLK (1 << 0) |
9366 | #define TRANS_MSA_SAMPLING_444 (2 << 1) | ||
9367 | #define TRANS_MSA_CLRSP_YCBCR (2 << 3) | ||
9225 | #define TRANS_MSA_6_BPC (0 << 5) | 9368 | #define TRANS_MSA_6_BPC (0 << 5) |
9226 | #define TRANS_MSA_8_BPC (1 << 5) | 9369 | #define TRANS_MSA_8_BPC (1 << 5) |
9227 | #define TRANS_MSA_10_BPC (2 << 5) | 9370 | #define TRANS_MSA_10_BPC (2 << 5) |
@@ -9789,6 +9932,10 @@ enum skl_power_gate { | |||
9789 | #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ | 9932 | #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ |
9790 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) | 9933 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) |
9791 | 9934 | ||
9935 | /* Gen11 DSI */ | ||
9936 | #define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ | ||
9937 | dsi0, dsi1) | ||
9938 | |||
9792 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) | 9939 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) |
9793 | #define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF | 9940 | #define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF |
9794 | #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) | 9941 | #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) |
@@ -9952,6 +10099,39 @@ enum skl_power_gate { | |||
9952 | _ICL_DSI_IO_MODECTL_1) | 10099 | _ICL_DSI_IO_MODECTL_1) |
9953 | #define COMBO_PHY_MODE_DSI (1 << 0) | 10100 | #define COMBO_PHY_MODE_DSI (1 << 0) |
9954 | 10101 | ||
10102 | /* Display Stream Splitter Control */ | ||
10103 | #define DSS_CTL1 _MMIO(0x67400) | ||
10104 | #define SPLITTER_ENABLE (1 << 31) | ||
10105 | #define JOINER_ENABLE (1 << 30) | ||
10106 | #define DUAL_LINK_MODE_INTERLEAVE (1 << 24) | ||
10107 | #define DUAL_LINK_MODE_FRONTBACK (0 << 24) | ||
10108 | #define OVERLAP_PIXELS_MASK (0xf << 16) | ||
10109 | #define OVERLAP_PIXELS(pixels) ((pixels) << 16) | ||
10110 | #define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) | ||
10111 | #define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) | ||
10112 | #define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 | ||
10113 | |||
10114 | #define DSS_CTL2 _MMIO(0x67404) | ||
10115 | #define LEFT_BRANCH_VDSC_ENABLE (1 << 31) | ||
10116 | #define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) | ||
10117 | #define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) | ||
10118 | #define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) | ||
10119 | |||
10120 | #define _ICL_PIPE_DSS_CTL1_PB 0x78200 | ||
10121 | #define _ICL_PIPE_DSS_CTL1_PC 0x78400 | ||
10122 | #define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | ||
10123 | _ICL_PIPE_DSS_CTL1_PB, \ | ||
10124 | _ICL_PIPE_DSS_CTL1_PC) | ||
10125 | #define BIG_JOINER_ENABLE (1 << 29) | ||
10126 | #define MASTER_BIG_JOINER_ENABLE (1 << 28) | ||
10127 | #define VGA_CENTERING_ENABLE (1 << 27) | ||
10128 | |||
10129 | #define _ICL_PIPE_DSS_CTL2_PB 0x78204 | ||
10130 | #define _ICL_PIPE_DSS_CTL2_PC 0x78404 | ||
10131 | #define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | ||
10132 | _ICL_PIPE_DSS_CTL2_PB, \ | ||
10133 | _ICL_PIPE_DSS_CTL2_PC) | ||
10134 | |||
9955 | #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) | 10135 | #define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020) |
9956 | #define STAP_SELECT (1 << 0) | 10136 | #define STAP_SELECT (1 << 0) |
9957 | 10137 | ||
@@ -10288,6 +10468,235 @@ enum skl_power_gate { | |||
10288 | _ICL_DSI_T_INIT_MASTER_0,\ | 10468 | _ICL_DSI_T_INIT_MASTER_0,\ |
10289 | _ICL_DSI_T_INIT_MASTER_1) | 10469 | _ICL_DSI_T_INIT_MASTER_1) |
10290 | 10470 | ||
10471 | #define _DPHY_CLK_TIMING_PARAM_0 0x162180 | ||
10472 | #define _DPHY_CLK_TIMING_PARAM_1 0x6c180 | ||
10473 | #define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10474 | _DPHY_CLK_TIMING_PARAM_0,\ | ||
10475 | _DPHY_CLK_TIMING_PARAM_1) | ||
10476 | #define _DSI_CLK_TIMING_PARAM_0 0x6b080 | ||
10477 | #define _DSI_CLK_TIMING_PARAM_1 0x6b880 | ||
10478 | #define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10479 | _DSI_CLK_TIMING_PARAM_0,\ | ||
10480 | _DSI_CLK_TIMING_PARAM_1) | ||
10481 | #define CLK_PREPARE_OVERRIDE (1 << 31) | ||
10482 | #define CLK_PREPARE(x) ((x) << 28) | ||
10483 | #define CLK_PREPARE_MASK (0x7 << 28) | ||
10484 | #define CLK_PREPARE_SHIFT 28 | ||
10485 | #define CLK_ZERO_OVERRIDE (1 << 27) | ||
10486 | #define CLK_ZERO(x) ((x) << 20) | ||
10487 | #define CLK_ZERO_MASK (0xf << 20) | ||
10488 | #define CLK_ZERO_SHIFT 20 | ||
10489 | #define CLK_PRE_OVERRIDE (1 << 19) | ||
10490 | #define CLK_PRE(x) ((x) << 16) | ||
10491 | #define CLK_PRE_MASK (0x3 << 16) | ||
10492 | #define CLK_PRE_SHIFT 16 | ||
10493 | #define CLK_POST_OVERRIDE (1 << 15) | ||
10494 | #define CLK_POST(x) ((x) << 8) | ||
10495 | #define CLK_POST_MASK (0x7 << 8) | ||
10496 | #define CLK_POST_SHIFT 8 | ||
10497 | #define CLK_TRAIL_OVERRIDE (1 << 7) | ||
10498 | #define CLK_TRAIL(x) ((x) << 0) | ||
10499 | #define CLK_TRAIL_MASK (0xf << 0) | ||
10500 | #define CLK_TRAIL_SHIFT 0 | ||
10501 | |||
10502 | #define _DPHY_DATA_TIMING_PARAM_0 0x162184 | ||
10503 | #define _DPHY_DATA_TIMING_PARAM_1 0x6c184 | ||
10504 | #define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10505 | _DPHY_DATA_TIMING_PARAM_0,\ | ||
10506 | _DPHY_DATA_TIMING_PARAM_1) | ||
10507 | #define _DSI_DATA_TIMING_PARAM_0 0x6B084 | ||
10508 | #define _DSI_DATA_TIMING_PARAM_1 0x6B884 | ||
10509 | #define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10510 | _DSI_DATA_TIMING_PARAM_0,\ | ||
10511 | _DSI_DATA_TIMING_PARAM_1) | ||
10512 | #define HS_PREPARE_OVERRIDE (1 << 31) | ||
10513 | #define HS_PREPARE(x) ((x) << 24) | ||
10514 | #define HS_PREPARE_MASK (0x7 << 24) | ||
10515 | #define HS_PREPARE_SHIFT 24 | ||
10516 | #define HS_ZERO_OVERRIDE (1 << 23) | ||
10517 | #define HS_ZERO(x) ((x) << 16) | ||
10518 | #define HS_ZERO_MASK (0xf << 16) | ||
10519 | #define HS_ZERO_SHIFT 16 | ||
10520 | #define HS_TRAIL_OVERRIDE (1 << 15) | ||
10521 | #define HS_TRAIL(x) ((x) << 8) | ||
10522 | #define HS_TRAIL_MASK (0x7 << 8) | ||
10523 | #define HS_TRAIL_SHIFT 8 | ||
10524 | #define HS_EXIT_OVERRIDE (1 << 7) | ||
10525 | #define HS_EXIT(x) ((x) << 0) | ||
10526 | #define HS_EXIT_MASK (0x7 << 0) | ||
10527 | #define HS_EXIT_SHIFT 0 | ||
10528 | |||
10529 | #define _DPHY_TA_TIMING_PARAM_0 0x162188 | ||
10530 | #define _DPHY_TA_TIMING_PARAM_1 0x6c188 | ||
10531 | #define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10532 | _DPHY_TA_TIMING_PARAM_0,\ | ||
10533 | _DPHY_TA_TIMING_PARAM_1) | ||
10534 | #define _DSI_TA_TIMING_PARAM_0 0x6b098 | ||
10535 | #define _DSI_TA_TIMING_PARAM_1 0x6b898 | ||
10536 | #define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ | ||
10537 | _DSI_TA_TIMING_PARAM_0,\ | ||
10538 | _DSI_TA_TIMING_PARAM_1) | ||
10539 | #define TA_SURE_OVERRIDE (1 << 31) | ||
10540 | #define TA_SURE(x) ((x) << 16) | ||
10541 | #define TA_SURE_MASK (0x1f << 16) | ||
10542 | #define TA_SURE_SHIFT 16 | ||
10543 | #define TA_GO_OVERRIDE (1 << 15) | ||
10544 | #define TA_GO(x) ((x) << 8) | ||
10545 | #define TA_GO_MASK (0xf << 8) | ||
10546 | #define TA_GO_SHIFT 8 | ||
10547 | #define TA_GET_OVERRIDE (1 << 7) | ||
10548 | #define TA_GET(x) ((x) << 0) | ||
10549 | #define TA_GET_MASK (0xf << 0) | ||
10550 | #define TA_GET_SHIFT 0 | ||
10551 | |||
10552 | /* DSI transcoder configuration */ | ||
10553 | #define _DSI_TRANS_FUNC_CONF_0 0x6b030 | ||
10554 | #define _DSI_TRANS_FUNC_CONF_1 0x6b830 | ||
10555 | #define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ | ||
10556 | _DSI_TRANS_FUNC_CONF_0,\ | ||
10557 | _DSI_TRANS_FUNC_CONF_1) | ||
10558 | #define OP_MODE_MASK (0x3 << 28) | ||
10559 | #define OP_MODE_SHIFT 28 | ||
10560 | #define CMD_MODE_NO_GATE (0x0 << 28) | ||
10561 | #define CMD_MODE_TE_GATE (0x1 << 28) | ||
10562 | #define VIDEO_MODE_SYNC_EVENT (0x2 << 28) | ||
10563 | #define VIDEO_MODE_SYNC_PULSE (0x3 << 28) | ||
10564 | #define LINK_READY (1 << 20) | ||
10565 | #define PIX_FMT_MASK (0x3 << 16) | ||
10566 | #define PIX_FMT_SHIFT 16 | ||
10567 | #define PIX_FMT_RGB565 (0x0 << 16) | ||
10568 | #define PIX_FMT_RGB666_PACKED (0x1 << 16) | ||
10569 | #define PIX_FMT_RGB666_LOOSE (0x2 << 16) | ||
10570 | #define PIX_FMT_RGB888 (0x3 << 16) | ||
10571 | #define PIX_FMT_RGB101010 (0x4 << 16) | ||
10572 | #define PIX_FMT_RGB121212 (0x5 << 16) | ||
10573 | #define PIX_FMT_COMPRESSED (0x6 << 16) | ||
10574 | #define BGR_TRANSMISSION (1 << 15) | ||
10575 | #define PIX_VIRT_CHAN(x) ((x) << 12) | ||
10576 | #define PIX_VIRT_CHAN_MASK (0x3 << 12) | ||
10577 | #define PIX_VIRT_CHAN_SHIFT 12 | ||
10578 | #define PIX_BUF_THRESHOLD_MASK (0x3 << 10) | ||
10579 | #define PIX_BUF_THRESHOLD_SHIFT 10 | ||
10580 | #define PIX_BUF_THRESHOLD_1_4 (0x0 << 10) | ||
10581 | #define PIX_BUF_THRESHOLD_1_2 (0x1 << 10) | ||
10582 | #define PIX_BUF_THRESHOLD_3_4 (0x2 << 10) | ||
10583 | #define PIX_BUF_THRESHOLD_FULL (0x3 << 10) | ||
10584 | #define CONTINUOUS_CLK_MASK (0x3 << 8) | ||
10585 | #define CONTINUOUS_CLK_SHIFT 8 | ||
10586 | #define CLK_ENTER_LP_AFTER_DATA (0x0 << 8) | ||
10587 | #define CLK_HS_OR_LP (0x2 << 8) | ||
10588 | #define CLK_HS_CONTINUOUS (0x3 << 8) | ||
10589 | #define LINK_CALIBRATION_MASK (0x3 << 4) | ||
10590 | #define LINK_CALIBRATION_SHIFT 4 | ||
10591 | #define CALIBRATION_DISABLED (0x0 << 4) | ||
10592 | #define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4) | ||
10593 | #define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4) | ||
10594 | #define S3D_ORIENTATION_LANDSCAPE (1 << 1) | ||
10595 | #define EOTP_DISABLED (1 << 0) | ||
10596 | |||
10597 | #define _DSI_CMD_RXCTL_0 0x6b0d4 | ||
10598 | #define _DSI_CMD_RXCTL_1 0x6b8d4 | ||
10599 | #define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ | ||
10600 | _DSI_CMD_RXCTL_0,\ | ||
10601 | _DSI_CMD_RXCTL_1) | ||
10602 | #define READ_UNLOADS_DW (1 << 16) | ||
10603 | #define RECEIVED_UNASSIGNED_TRIGGER (1 << 15) | ||
10604 | #define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14) | ||
10605 | #define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13) | ||
10606 | #define RECEIVED_RESET_TRIGGER (1 << 12) | ||
10607 | #define RECEIVED_PAYLOAD_WAS_LOST (1 << 11) | ||
10608 | #define RECEIVED_CRC_WAS_LOST (1 << 10) | ||
10609 | #define NUMBER_RX_PLOAD_DW_MASK (0xff << 0) | ||
10610 | #define NUMBER_RX_PLOAD_DW_SHIFT 0 | ||
10611 | |||
10612 | #define _DSI_CMD_TXCTL_0 0x6b0d0 | ||
10613 | #define _DSI_CMD_TXCTL_1 0x6b8d0 | ||
10614 | #define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ | ||
10615 | _DSI_CMD_TXCTL_0,\ | ||
10616 | _DSI_CMD_TXCTL_1) | ||
10617 | #define KEEP_LINK_IN_HS (1 << 24) | ||
10618 | #define FREE_HEADER_CREDIT_MASK (0x1f << 8) | ||
10619 | #define FREE_HEADER_CREDIT_SHIFT 0x8 | ||
10620 | #define FREE_PLOAD_CREDIT_MASK (0xff << 0) | ||
10621 | #define FREE_PLOAD_CREDIT_SHIFT 0 | ||
10622 | #define MAX_HEADER_CREDIT 0x10 | ||
10623 | #define MAX_PLOAD_CREDIT 0x40 | ||
10624 | |||
10625 | #define _DSI_CMD_TXHDR_0 0x6b100 | ||
10626 | #define _DSI_CMD_TXHDR_1 0x6b900 | ||
10627 | #define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ | ||
10628 | _DSI_CMD_TXHDR_0,\ | ||
10629 | _DSI_CMD_TXHDR_1) | ||
10630 | #define PAYLOAD_PRESENT (1 << 31) | ||
10631 | #define LP_DATA_TRANSFER (1 << 30) | ||
10632 | #define VBLANK_FENCE (1 << 29) | ||
10633 | #define PARAM_WC_MASK (0xffff << 8) | ||
10634 | #define PARAM_WC_LOWER_SHIFT 8 | ||
10635 | #define PARAM_WC_UPPER_SHIFT 16 | ||
10636 | #define VC_MASK (0x3 << 6) | ||
10637 | #define VC_SHIFT 6 | ||
10638 | #define DT_MASK (0x3f << 0) | ||
10639 | #define DT_SHIFT 0 | ||
10640 | |||
10641 | #define _DSI_CMD_TXPYLD_0 0x6b104 | ||
10642 | #define _DSI_CMD_TXPYLD_1 0x6b904 | ||
10643 | #define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ | ||
10644 | _DSI_CMD_TXPYLD_0,\ | ||
10645 | _DSI_CMD_TXPYLD_1) | ||
10646 | |||
10647 | #define _DSI_LP_MSG_0 0x6b0d8 | ||
10648 | #define _DSI_LP_MSG_1 0x6b8d8 | ||
10649 | #define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ | ||
10650 | _DSI_LP_MSG_0,\ | ||
10651 | _DSI_LP_MSG_1) | ||
10652 | #define LPTX_IN_PROGRESS (1 << 17) | ||
10653 | #define LINK_IN_ULPS (1 << 16) | ||
10654 | #define LINK_ULPS_TYPE_LP11 (1 << 8) | ||
10655 | #define LINK_ENTER_ULPS (1 << 0) | ||
10656 | |||
10657 | /* DSI timeout registers */ | ||
10658 | #define _DSI_HSTX_TO_0 0x6b044 | ||
10659 | #define _DSI_HSTX_TO_1 0x6b844 | ||
10660 | #define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ | ||
10661 | _DSI_HSTX_TO_0,\ | ||
10662 | _DSI_HSTX_TO_1) | ||
10663 | #define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16) | ||
10664 | #define HSTX_TIMEOUT_VALUE_SHIFT 16 | ||
10665 | #define HSTX_TIMEOUT_VALUE(x) ((x) << 16) | ||
10666 | #define HSTX_TIMED_OUT (1 << 0) | ||
10667 | |||
10668 | #define _DSI_LPRX_HOST_TO_0 0x6b048 | ||
10669 | #define _DSI_LPRX_HOST_TO_1 0x6b848 | ||
10670 | #define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ | ||
10671 | _DSI_LPRX_HOST_TO_0,\ | ||
10672 | _DSI_LPRX_HOST_TO_1) | ||
10673 | #define LPRX_TIMED_OUT (1 << 16) | ||
10674 | #define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0) | ||
10675 | #define LPRX_TIMEOUT_VALUE_SHIFT 0 | ||
10676 | #define LPRX_TIMEOUT_VALUE(x) ((x) << 0) | ||
10677 | |||
10678 | #define _DSI_PWAIT_TO_0 0x6b040 | ||
10679 | #define _DSI_PWAIT_TO_1 0x6b840 | ||
10680 | #define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ | ||
10681 | _DSI_PWAIT_TO_0,\ | ||
10682 | _DSI_PWAIT_TO_1) | ||
10683 | #define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16) | ||
10684 | #define PRESET_TIMEOUT_VALUE_SHIFT 16 | ||
10685 | #define PRESET_TIMEOUT_VALUE(x) ((x) << 16) | ||
10686 | #define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0) | ||
10687 | #define PRESPONSE_TIMEOUT_VALUE_SHIFT 0 | ||
10688 | #define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0) | ||
10689 | |||
10690 | #define _DSI_TA_TO_0 0x6b04c | ||
10691 | #define _DSI_TA_TO_1 0x6b84c | ||
10692 | #define DSI_TA_TO(tc) _MMIO_DSI(tc, \ | ||
10693 | _DSI_TA_TO_0,\ | ||
10694 | _DSI_TA_TO_1) | ||
10695 | #define TA_TIMED_OUT (1 << 16) | ||
10696 | #define TA_TIMEOUT_VALUE_MASK (0xffff << 0) | ||
10697 | #define TA_TIMEOUT_VALUE_SHIFT 0 | ||
10698 | #define TA_TIMEOUT_VALUE(x) ((x) << 0) | ||
10699 | |||
10291 | /* bits 31:0 */ | 10700 | /* bits 31:0 */ |
10292 | #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) | 10701 | #define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084) |
10293 | #define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) | 10702 | #define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884) |
@@ -10400,10 +10809,6 @@ enum skl_power_gate { | |||
10400 | #define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) | 10809 | #define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID) |
10401 | #define READ_DATA_VALID(n) (1 << (n)) | 10810 | #define READ_DATA_VALID(n) (1 << (n)) |
10402 | 10811 | ||
10403 | /* For UMS only (deprecated): */ | ||
10404 | #define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000) | ||
10405 | #define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800) | ||
10406 | |||
10407 | /* MOCS (Memory Object Control State) registers */ | 10812 | /* MOCS (Memory Object Control State) registers */ |
10408 | #define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ | 10813 | #define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */ |
10409 | 10814 | ||
@@ -10689,6 +11094,7 @@ enum skl_power_gate { | |||
10689 | #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ | 11094 | #define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ |
10690 | _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ | 11095 | _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ |
10691 | _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) | 11096 | _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) |
11097 | #define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) | ||
10692 | #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) | 11098 | #define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) |
10693 | #define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) | 11099 | #define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) |
10694 | 11100 | ||
@@ -10743,17 +11149,17 @@ enum skl_power_gate { | |||
10743 | _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ | 11149 | _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ |
10744 | _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) | 11150 | _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) |
10745 | 11151 | ||
10746 | #define PORT_TX_DFLEXDPSP _MMIO(0x1638A0) | 11152 | #define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0) |
10747 | #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) | 11153 | #define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) |
10748 | #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) | 11154 | #define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) |
10749 | #define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) | 11155 | #define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) |
10750 | #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) | 11156 | #define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) |
10751 | #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) | 11157 | #define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) |
10752 | 11158 | ||
10753 | #define PORT_TX_DFLEXDPPMS _MMIO(0x163890) | 11159 | #define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890) |
10754 | #define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) | 11160 | #define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) |
10755 | 11161 | ||
10756 | #define PORT_TX_DFLEXDPCSSS _MMIO(0x163894) | 11162 | #define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894) |
10757 | #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) | 11163 | #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) |
10758 | 11164 | ||
10759 | #endif /* _I915_REG_H_ */ | 11165 | #endif /* _I915_REG_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a492385b2089..71107540581d 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c | |||
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request) | |||
111 | spin_unlock(&file_priv->mm.lock); | 111 | spin_unlock(&file_priv->mm.lock); |
112 | } | 112 | } |
113 | 113 | ||
114 | static struct i915_dependency * | ||
115 | i915_dependency_alloc(struct drm_i915_private *i915) | ||
116 | { | ||
117 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | ||
118 | } | ||
119 | |||
120 | static void | ||
121 | i915_dependency_free(struct drm_i915_private *i915, | ||
122 | struct i915_dependency *dep) | ||
123 | { | ||
124 | kmem_cache_free(i915->dependencies, dep); | ||
125 | } | ||
126 | |||
127 | static void | ||
128 | __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
129 | struct i915_sched_node *signal, | ||
130 | struct i915_dependency *dep, | ||
131 | unsigned long flags) | ||
132 | { | ||
133 | INIT_LIST_HEAD(&dep->dfs_link); | ||
134 | list_add(&dep->wait_link, &signal->waiters_list); | ||
135 | list_add(&dep->signal_link, &node->signalers_list); | ||
136 | dep->signaler = signal; | ||
137 | dep->flags = flags; | ||
138 | } | ||
139 | |||
140 | static int | ||
141 | i915_sched_node_add_dependency(struct drm_i915_private *i915, | ||
142 | struct i915_sched_node *node, | ||
143 | struct i915_sched_node *signal) | ||
144 | { | ||
145 | struct i915_dependency *dep; | ||
146 | |||
147 | dep = i915_dependency_alloc(i915); | ||
148 | if (!dep) | ||
149 | return -ENOMEM; | ||
150 | |||
151 | __i915_sched_node_add_dependency(node, signal, dep, | ||
152 | I915_DEPENDENCY_ALLOC); | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | static void | ||
157 | i915_sched_node_fini(struct drm_i915_private *i915, | ||
158 | struct i915_sched_node *node) | ||
159 | { | ||
160 | struct i915_dependency *dep, *tmp; | ||
161 | |||
162 | GEM_BUG_ON(!list_empty(&node->link)); | ||
163 | |||
164 | /* | ||
165 | * Everyone we depended upon (the fences we wait to be signaled) | ||
166 | * should retire before us and remove themselves from our list. | ||
167 | * However, retirement is run independently on each timeline and | ||
168 | * so we may be called out-of-order. | ||
169 | */ | ||
170 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { | ||
171 | GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler)); | ||
172 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
173 | |||
174 | list_del(&dep->wait_link); | ||
175 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
176 | i915_dependency_free(i915, dep); | ||
177 | } | ||
178 | |||
179 | /* Remove ourselves from everyone who depends upon us */ | ||
180 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { | ||
181 | GEM_BUG_ON(dep->signaler != node); | ||
182 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
183 | |||
184 | list_del(&dep->signal_link); | ||
185 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
186 | i915_dependency_free(i915, dep); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static void | ||
191 | i915_sched_node_init(struct i915_sched_node *node) | ||
192 | { | ||
193 | INIT_LIST_HEAD(&node->signalers_list); | ||
194 | INIT_LIST_HEAD(&node->waiters_list); | ||
195 | INIT_LIST_HEAD(&node->link); | ||
196 | node->attr.priority = I915_PRIORITY_INVALID; | ||
197 | } | ||
198 | |||
199 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | 114 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) |
200 | { | 115 | { |
201 | struct intel_engine_cs *engine; | 116 | struct intel_engine_cs *engine; |
@@ -221,6 +136,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | |||
221 | intel_engine_get_seqno(engine), | 136 | intel_engine_get_seqno(engine), |
222 | seqno); | 137 | seqno); |
223 | 138 | ||
139 | kthread_park(engine->breadcrumbs.signaler); | ||
140 | |||
224 | if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { | 141 | if (!i915_seqno_passed(seqno, engine->timeline.seqno)) { |
225 | /* Flush any waiters before we reuse the seqno */ | 142 | /* Flush any waiters before we reuse the seqno */ |
226 | intel_engine_disarm_breadcrumbs(engine); | 143 | intel_engine_disarm_breadcrumbs(engine); |
@@ -235,6 +152,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) | |||
235 | /* Finally reset hw state */ | 152 | /* Finally reset hw state */ |
236 | intel_engine_init_global_seqno(engine, seqno); | 153 | intel_engine_init_global_seqno(engine, seqno); |
237 | engine->timeline.seqno = seqno; | 154 | engine->timeline.seqno = seqno; |
155 | |||
156 | kthread_unpark(engine->breadcrumbs.signaler); | ||
238 | } | 157 | } |
239 | 158 | ||
240 | list_for_each_entry(timeline, &i915->gt.timelines, link) | 159 | list_for_each_entry(timeline, &i915->gt.timelines, link) |
@@ -740,17 +659,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) | |||
740 | if (rq) | 659 | if (rq) |
741 | cond_synchronize_rcu(rq->rcustate); | 660 | cond_synchronize_rcu(rq->rcustate); |
742 | 661 | ||
743 | /* | ||
744 | * We've forced the client to stall and catch up with whatever | ||
745 | * backlog there might have been. As we are assuming that we | ||
746 | * caused the mempressure, now is an opportune time to | ||
747 | * recover as much memory from the request pool as is possible. | ||
748 | * Having already penalized the client to stall, we spend | ||
749 | * a little extra time to re-optimise page allocation. | ||
750 | */ | ||
751 | kmem_cache_shrink(i915->requests); | ||
752 | rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ | ||
753 | |||
754 | rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); | 662 | rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); |
755 | if (!rq) { | 663 | if (!rq) { |
756 | ret = -ENOMEM; | 664 | ret = -ENOMEM; |
@@ -1127,8 +1035,20 @@ void i915_request_add(struct i915_request *request) | |||
1127 | */ | 1035 | */ |
1128 | local_bh_disable(); | 1036 | local_bh_disable(); |
1129 | rcu_read_lock(); /* RCU serialisation for set-wedged protection */ | 1037 | rcu_read_lock(); /* RCU serialisation for set-wedged protection */ |
1130 | if (engine->schedule) | 1038 | if (engine->schedule) { |
1131 | engine->schedule(request, &request->gem_context->sched); | 1039 | struct i915_sched_attr attr = request->gem_context->sched; |
1040 | |||
1041 | /* | ||
1042 | * Boost priorities to new clients (new request flows). | ||
1043 | * | ||
1044 | * Allow interactive/synchronous clients to jump ahead of | ||
1045 | * the bulk clients. (FQ_CODEL) | ||
1046 | */ | ||
1047 | if (!prev || i915_request_completed(prev)) | ||
1048 | attr.priority |= I915_PRIORITY_NEWCLIENT; | ||
1049 | |||
1050 | engine->schedule(request, &attr); | ||
1051 | } | ||
1132 | rcu_read_unlock(); | 1052 | rcu_read_unlock(); |
1133 | i915_sw_fence_commit(&request->submit); | 1053 | i915_sw_fence_commit(&request->submit); |
1134 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | 1054 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ |
@@ -1310,6 +1230,8 @@ long i915_request_wait(struct i915_request *rq, | |||
1310 | add_wait_queue(errq, &reset); | 1230 | add_wait_queue(errq, &reset); |
1311 | 1231 | ||
1312 | intel_wait_init(&wait); | 1232 | intel_wait_init(&wait); |
1233 | if (flags & I915_WAIT_PRIORITY) | ||
1234 | i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); | ||
1313 | 1235 | ||
1314 | restart: | 1236 | restart: |
1315 | do { | 1237 | do { |
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 7fa94b024968..90e9d170a0cd 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h | |||
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq, | |||
277 | __attribute__((nonnull(1))); | 277 | __attribute__((nonnull(1))); |
278 | #define I915_WAIT_INTERRUPTIBLE BIT(0) | 278 | #define I915_WAIT_INTERRUPTIBLE BIT(0) |
279 | #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ | 279 | #define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ |
280 | #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ | 280 | #define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */ |
281 | #define I915_WAIT_FOR_IDLE_BOOST BIT(3) | 281 | #define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */ |
282 | #define I915_WAIT_FOR_IDLE_BOOST BIT(4) | ||
282 | 283 | ||
283 | static inline bool intel_engine_has_started(struct intel_engine_cs *engine, | 284 | static inline bool intel_engine_has_started(struct intel_engine_cs *engine, |
284 | u32 seqno); | 285 | u32 seqno); |
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq) | |||
332 | return __i915_request_completed(rq, seqno); | 333 | return __i915_request_completed(rq, seqno); |
333 | } | 334 | } |
334 | 335 | ||
335 | static inline bool i915_sched_node_signaled(const struct i915_sched_node *node) | ||
336 | { | ||
337 | const struct i915_request *rq = | ||
338 | container_of(node, const struct i915_request, sched); | ||
339 | |||
340 | return i915_request_completed(rq); | ||
341 | } | ||
342 | |||
343 | void i915_retire_requests(struct drm_i915_private *i915); | 336 | void i915_retire_requests(struct drm_i915_private *i915); |
344 | 337 | ||
345 | /* | 338 | /* |
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c new file mode 100644 index 000000000000..340faea6c08a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_scheduler.c | |||
@@ -0,0 +1,399 @@ | |||
1 | /* | ||
2 | * SPDX-License-Identifier: MIT | ||
3 | * | ||
4 | * Copyright © 2018 Intel Corporation | ||
5 | */ | ||
6 | |||
7 | #include <linux/mutex.h> | ||
8 | |||
9 | #include "i915_drv.h" | ||
10 | #include "i915_request.h" | ||
11 | #include "i915_scheduler.h" | ||
12 | |||
13 | static DEFINE_SPINLOCK(schedule_lock); | ||
14 | |||
15 | static const struct i915_request * | ||
16 | node_to_request(const struct i915_sched_node *node) | ||
17 | { | ||
18 | return container_of(node, const struct i915_request, sched); | ||
19 | } | ||
20 | |||
21 | static inline bool node_signaled(const struct i915_sched_node *node) | ||
22 | { | ||
23 | return i915_request_completed(node_to_request(node)); | ||
24 | } | ||
25 | |||
26 | void i915_sched_node_init(struct i915_sched_node *node) | ||
27 | { | ||
28 | INIT_LIST_HEAD(&node->signalers_list); | ||
29 | INIT_LIST_HEAD(&node->waiters_list); | ||
30 | INIT_LIST_HEAD(&node->link); | ||
31 | node->attr.priority = I915_PRIORITY_INVALID; | ||
32 | } | ||
33 | |||
34 | static struct i915_dependency * | ||
35 | i915_dependency_alloc(struct drm_i915_private *i915) | ||
36 | { | ||
37 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | ||
38 | } | ||
39 | |||
40 | static void | ||
41 | i915_dependency_free(struct drm_i915_private *i915, | ||
42 | struct i915_dependency *dep) | ||
43 | { | ||
44 | kmem_cache_free(i915->dependencies, dep); | ||
45 | } | ||
46 | |||
47 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
48 | struct i915_sched_node *signal, | ||
49 | struct i915_dependency *dep, | ||
50 | unsigned long flags) | ||
51 | { | ||
52 | bool ret = false; | ||
53 | |||
54 | spin_lock(&schedule_lock); | ||
55 | |||
56 | if (!node_signaled(signal)) { | ||
57 | INIT_LIST_HEAD(&dep->dfs_link); | ||
58 | list_add(&dep->wait_link, &signal->waiters_list); | ||
59 | list_add(&dep->signal_link, &node->signalers_list); | ||
60 | dep->signaler = signal; | ||
61 | dep->flags = flags; | ||
62 | |||
63 | ret = true; | ||
64 | } | ||
65 | |||
66 | spin_unlock(&schedule_lock); | ||
67 | |||
68 | return ret; | ||
69 | } | ||
70 | |||
71 | int i915_sched_node_add_dependency(struct drm_i915_private *i915, | ||
72 | struct i915_sched_node *node, | ||
73 | struct i915_sched_node *signal) | ||
74 | { | ||
75 | struct i915_dependency *dep; | ||
76 | |||
77 | dep = i915_dependency_alloc(i915); | ||
78 | if (!dep) | ||
79 | return -ENOMEM; | ||
80 | |||
81 | if (!__i915_sched_node_add_dependency(node, signal, dep, | ||
82 | I915_DEPENDENCY_ALLOC)) | ||
83 | i915_dependency_free(i915, dep); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | void i915_sched_node_fini(struct drm_i915_private *i915, | ||
89 | struct i915_sched_node *node) | ||
90 | { | ||
91 | struct i915_dependency *dep, *tmp; | ||
92 | |||
93 | GEM_BUG_ON(!list_empty(&node->link)); | ||
94 | |||
95 | spin_lock(&schedule_lock); | ||
96 | |||
97 | /* | ||
98 | * Everyone we depended upon (the fences we wait to be signaled) | ||
99 | * should retire before us and remove themselves from our list. | ||
100 | * However, retirement is run independently on each timeline and | ||
101 | * so we may be called out-of-order. | ||
102 | */ | ||
103 | list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) { | ||
104 | GEM_BUG_ON(!node_signaled(dep->signaler)); | ||
105 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
106 | |||
107 | list_del(&dep->wait_link); | ||
108 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
109 | i915_dependency_free(i915, dep); | ||
110 | } | ||
111 | |||
112 | /* Remove ourselves from everyone who depends upon us */ | ||
113 | list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) { | ||
114 | GEM_BUG_ON(dep->signaler != node); | ||
115 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | ||
116 | |||
117 | list_del(&dep->signal_link); | ||
118 | if (dep->flags & I915_DEPENDENCY_ALLOC) | ||
119 | i915_dependency_free(i915, dep); | ||
120 | } | ||
121 | |||
122 | spin_unlock(&schedule_lock); | ||
123 | } | ||
124 | |||
125 | static inline struct i915_priolist *to_priolist(struct rb_node *rb) | ||
126 | { | ||
127 | return rb_entry(rb, struct i915_priolist, node); | ||
128 | } | ||
129 | |||
130 | static void assert_priolists(struct intel_engine_execlists * const execlists, | ||
131 | long queue_priority) | ||
132 | { | ||
133 | struct rb_node *rb; | ||
134 | long last_prio, i; | ||
135 | |||
136 | if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | ||
137 | return; | ||
138 | |||
139 | GEM_BUG_ON(rb_first_cached(&execlists->queue) != | ||
140 | rb_first(&execlists->queue.rb_root)); | ||
141 | |||
142 | last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1; | ||
143 | for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { | ||
144 | const struct i915_priolist *p = to_priolist(rb); | ||
145 | |||
146 | GEM_BUG_ON(p->priority >= last_prio); | ||
147 | last_prio = p->priority; | ||
148 | |||
149 | GEM_BUG_ON(!p->used); | ||
150 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) { | ||
151 | if (list_empty(&p->requests[i])) | ||
152 | continue; | ||
153 | |||
154 | GEM_BUG_ON(!(p->used & BIT(i))); | ||
155 | } | ||
156 | } | ||
157 | } | ||
158 | |||
159 | struct list_head * | ||
160 | i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio) | ||
161 | { | ||
162 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
163 | struct i915_priolist *p; | ||
164 | struct rb_node **parent, *rb; | ||
165 | bool first = true; | ||
166 | int idx, i; | ||
167 | |||
168 | lockdep_assert_held(&engine->timeline.lock); | ||
169 | assert_priolists(execlists, INT_MAX); | ||
170 | |||
171 | /* buckets sorted from highest [in slot 0] to lowest priority */ | ||
172 | idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1; | ||
173 | prio >>= I915_USER_PRIORITY_SHIFT; | ||
174 | if (unlikely(execlists->no_priolist)) | ||
175 | prio = I915_PRIORITY_NORMAL; | ||
176 | |||
177 | find_priolist: | ||
178 | /* most positive priority is scheduled first, equal priorities fifo */ | ||
179 | rb = NULL; | ||
180 | parent = &execlists->queue.rb_root.rb_node; | ||
181 | while (*parent) { | ||
182 | rb = *parent; | ||
183 | p = to_priolist(rb); | ||
184 | if (prio > p->priority) { | ||
185 | parent = &rb->rb_left; | ||
186 | } else if (prio < p->priority) { | ||
187 | parent = &rb->rb_right; | ||
188 | first = false; | ||
189 | } else { | ||
190 | goto out; | ||
191 | } | ||
192 | } | ||
193 | |||
194 | if (prio == I915_PRIORITY_NORMAL) { | ||
195 | p = &execlists->default_priolist; | ||
196 | } else { | ||
197 | p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); | ||
198 | /* Convert an allocation failure to a priority bump */ | ||
199 | if (unlikely(!p)) { | ||
200 | prio = I915_PRIORITY_NORMAL; /* recurses just once */ | ||
201 | |||
202 | /* To maintain ordering with all rendering, after an | ||
203 | * allocation failure we have to disable all scheduling. | ||
204 | * Requests will then be executed in fifo, and schedule | ||
205 | * will ensure that dependencies are emitted in fifo. | ||
206 | * There will be still some reordering with existing | ||
207 | * requests, so if userspace lied about their | ||
208 | * dependencies that reordering may be visible. | ||
209 | */ | ||
210 | execlists->no_priolist = true; | ||
211 | goto find_priolist; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | p->priority = prio; | ||
216 | for (i = 0; i < ARRAY_SIZE(p->requests); i++) | ||
217 | INIT_LIST_HEAD(&p->requests[i]); | ||
218 | rb_link_node(&p->node, rb, parent); | ||
219 | rb_insert_color_cached(&p->node, &execlists->queue, first); | ||
220 | p->used = 0; | ||
221 | |||
222 | out: | ||
223 | p->used |= BIT(idx); | ||
224 | return &p->requests[idx]; | ||
225 | } | ||
226 | |||
227 | static struct intel_engine_cs * | ||
228 | sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) | ||
229 | { | ||
230 | struct intel_engine_cs *engine = node_to_request(node)->engine; | ||
231 | |||
232 | GEM_BUG_ON(!locked); | ||
233 | |||
234 | if (engine != locked) { | ||
235 | spin_unlock(&locked->timeline.lock); | ||
236 | spin_lock(&engine->timeline.lock); | ||
237 | } | ||
238 | |||
239 | return engine; | ||
240 | } | ||
241 | |||
242 | static void __i915_schedule(struct i915_request *rq, | ||
243 | const struct i915_sched_attr *attr) | ||
244 | { | ||
245 | struct list_head *uninitialized_var(pl); | ||
246 | struct intel_engine_cs *engine, *last; | ||
247 | struct i915_dependency *dep, *p; | ||
248 | struct i915_dependency stack; | ||
249 | const int prio = attr->priority; | ||
250 | LIST_HEAD(dfs); | ||
251 | |||
252 | /* Needed in order to use the temporary link inside i915_dependency */ | ||
253 | lockdep_assert_held(&schedule_lock); | ||
254 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); | ||
255 | |||
256 | if (i915_request_completed(rq)) | ||
257 | return; | ||
258 | |||
259 | if (prio <= READ_ONCE(rq->sched.attr.priority)) | ||
260 | return; | ||
261 | |||
262 | stack.signaler = &rq->sched; | ||
263 | list_add(&stack.dfs_link, &dfs); | ||
264 | |||
265 | /* | ||
266 | * Recursively bump all dependent priorities to match the new request. | ||
267 | * | ||
268 | * A naive approach would be to use recursion: | ||
269 | * static void update_priorities(struct i915_sched_node *node, prio) { | ||
270 | * list_for_each_entry(dep, &node->signalers_list, signal_link) | ||
271 | * update_priorities(dep->signal, prio) | ||
272 | * queue_request(node); | ||
273 | * } | ||
274 | * but that may have unlimited recursion depth and so runs a very | ||
275 | * real risk of overunning the kernel stack. Instead, we build | ||
276 | * a flat list of all dependencies starting with the current request. | ||
277 | * As we walk the list of dependencies, we add all of its dependencies | ||
278 | * to the end of the list (this may include an already visited | ||
279 | * request) and continue to walk onwards onto the new dependencies. The | ||
280 | * end result is a topological list of requests in reverse order, the | ||
281 | * last element in the list is the request we must execute first. | ||
282 | */ | ||
283 | list_for_each_entry(dep, &dfs, dfs_link) { | ||
284 | struct i915_sched_node *node = dep->signaler; | ||
285 | |||
286 | /* | ||
287 | * Within an engine, there can be no cycle, but we may | ||
288 | * refer to the same dependency chain multiple times | ||
289 | * (redundant dependencies are not eliminated) and across | ||
290 | * engines. | ||
291 | */ | ||
292 | list_for_each_entry(p, &node->signalers_list, signal_link) { | ||
293 | GEM_BUG_ON(p == dep); /* no cycles! */ | ||
294 | |||
295 | if (node_signaled(p->signaler)) | ||
296 | continue; | ||
297 | |||
298 | GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); | ||
299 | if (prio > READ_ONCE(p->signaler->attr.priority)) | ||
300 | list_move_tail(&p->dfs_link, &dfs); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * If we didn't need to bump any existing priorities, and we haven't | ||
306 | * yet submitted this request (i.e. there is no potential race with | ||
307 | * execlists_submit_request()), we can set our own priority and skip | ||
308 | * acquiring the engine locks. | ||
309 | */ | ||
310 | if (rq->sched.attr.priority == I915_PRIORITY_INVALID) { | ||
311 | GEM_BUG_ON(!list_empty(&rq->sched.link)); | ||
312 | rq->sched.attr = *attr; | ||
313 | |||
314 | if (stack.dfs_link.next == stack.dfs_link.prev) | ||
315 | return; | ||
316 | |||
317 | __list_del_entry(&stack.dfs_link); | ||
318 | } | ||
319 | |||
320 | last = NULL; | ||
321 | engine = rq->engine; | ||
322 | spin_lock_irq(&engine->timeline.lock); | ||
323 | |||
324 | /* Fifo and depth-first replacement ensure our deps execute before us */ | ||
325 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { | ||
326 | struct i915_sched_node *node = dep->signaler; | ||
327 | |||
328 | INIT_LIST_HEAD(&dep->dfs_link); | ||
329 | |||
330 | engine = sched_lock_engine(node, engine); | ||
331 | |||
332 | /* Recheck after acquiring the engine->timeline.lock */ | ||
333 | if (prio <= node->attr.priority || node_signaled(node)) | ||
334 | continue; | ||
335 | |||
336 | node->attr.priority = prio; | ||
337 | if (!list_empty(&node->link)) { | ||
338 | if (last != engine) { | ||
339 | pl = i915_sched_lookup_priolist(engine, prio); | ||
340 | last = engine; | ||
341 | } | ||
342 | list_move_tail(&node->link, pl); | ||
343 | } else { | ||
344 | /* | ||
345 | * If the request is not in the priolist queue because | ||
346 | * it is not yet runnable, then it doesn't contribute | ||
347 | * to our preemption decisions. On the other hand, | ||
348 | * if the request is on the HW, it too is not in the | ||
349 | * queue; but in that case we may still need to reorder | ||
350 | * the inflight requests. | ||
351 | */ | ||
352 | if (!i915_sw_fence_done(&node_to_request(node)->submit)) | ||
353 | continue; | ||
354 | } | ||
355 | |||
356 | if (prio <= engine->execlists.queue_priority) | ||
357 | continue; | ||
358 | |||
359 | /* | ||
360 | * If we are already the currently executing context, don't | ||
361 | * bother evaluating if we should preempt ourselves. | ||
362 | */ | ||
363 | if (node_to_request(node)->global_seqno && | ||
364 | i915_seqno_passed(port_request(engine->execlists.port)->global_seqno, | ||
365 | node_to_request(node)->global_seqno)) | ||
366 | continue; | ||
367 | |||
368 | /* Defer (tasklet) submission until after all of our updates. */ | ||
369 | engine->execlists.queue_priority = prio; | ||
370 | tasklet_hi_schedule(&engine->execlists.tasklet); | ||
371 | } | ||
372 | |||
373 | spin_unlock_irq(&engine->timeline.lock); | ||
374 | } | ||
375 | |||
376 | void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) | ||
377 | { | ||
378 | spin_lock(&schedule_lock); | ||
379 | __i915_schedule(rq, attr); | ||
380 | spin_unlock(&schedule_lock); | ||
381 | } | ||
382 | |||
383 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) | ||
384 | { | ||
385 | struct i915_sched_attr attr; | ||
386 | |||
387 | GEM_BUG_ON(bump & ~I915_PRIORITY_MASK); | ||
388 | |||
389 | if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID) | ||
390 | return; | ||
391 | |||
392 | spin_lock_bh(&schedule_lock); | ||
393 | |||
394 | attr = rq->sched.attr; | ||
395 | attr.priority |= bump; | ||
396 | __i915_schedule(rq, &attr); | ||
397 | |||
398 | spin_unlock_bh(&schedule_lock); | ||
399 | } | ||
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 70a42220358d..dbe9cb7ecd82 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h | |||
@@ -8,9 +8,14 @@ | |||
8 | #define _I915_SCHEDULER_H_ | 8 | #define _I915_SCHEDULER_H_ |
9 | 9 | ||
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/kernel.h> | ||
11 | 12 | ||
12 | #include <uapi/drm/i915_drm.h> | 13 | #include <uapi/drm/i915_drm.h> |
13 | 14 | ||
15 | struct drm_i915_private; | ||
16 | struct i915_request; | ||
17 | struct intel_engine_cs; | ||
18 | |||
14 | enum { | 19 | enum { |
15 | I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, | 20 | I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, |
16 | I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, | 21 | I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, |
@@ -19,6 +24,15 @@ enum { | |||
19 | I915_PRIORITY_INVALID = INT_MIN | 24 | I915_PRIORITY_INVALID = INT_MIN |
20 | }; | 25 | }; |
21 | 26 | ||
27 | #define I915_USER_PRIORITY_SHIFT 2 | ||
28 | #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) | ||
29 | |||
30 | #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) | ||
31 | #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) | ||
32 | |||
33 | #define I915_PRIORITY_WAIT ((u8)BIT(0)) | ||
34 | #define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) | ||
35 | |||
22 | struct i915_sched_attr { | 36 | struct i915_sched_attr { |
23 | /** | 37 | /** |
24 | * @priority: execution and service priority | 38 | * @priority: execution and service priority |
@@ -69,4 +83,26 @@ struct i915_dependency { | |||
69 | #define I915_DEPENDENCY_ALLOC BIT(0) | 83 | #define I915_DEPENDENCY_ALLOC BIT(0) |
70 | }; | 84 | }; |
71 | 85 | ||
86 | void i915_sched_node_init(struct i915_sched_node *node); | ||
87 | |||
88 | bool __i915_sched_node_add_dependency(struct i915_sched_node *node, | ||
89 | struct i915_sched_node *signal, | ||
90 | struct i915_dependency *dep, | ||
91 | unsigned long flags); | ||
92 | |||
93 | int i915_sched_node_add_dependency(struct drm_i915_private *i915, | ||
94 | struct i915_sched_node *node, | ||
95 | struct i915_sched_node *signal); | ||
96 | |||
97 | void i915_sched_node_fini(struct drm_i915_private *i915, | ||
98 | struct i915_sched_node *node); | ||
99 | |||
100 | void i915_schedule(struct i915_request *request, | ||
101 | const struct i915_sched_attr *attr); | ||
102 | |||
103 | void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump); | ||
104 | |||
105 | struct list_head * | ||
106 | i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio); | ||
107 | |||
72 | #endif /* _I915_SCHEDULER_H_ */ | 108 | #endif /* _I915_SCHEDULER_H_ */ |
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c index 58f8d0cc125c..60404dbb2e9f 100644 --- a/drivers/gpu/drm/i915/i915_syncmap.c +++ b/drivers/gpu/drm/i915/i915_syncmap.c | |||
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root) | |||
92 | { | 92 | { |
93 | BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP); | 93 | BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP); |
94 | BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT); | 94 | BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT); |
95 | BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap)); | 95 | BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap)); |
96 | *root = NULL; | 96 | *root = NULL; |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index a2c2c3ab5fb0..ebd71b487220 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h | |||
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915, | |||
83 | const char *name); | 83 | const char *name); |
84 | void i915_timeline_fini(struct i915_timeline *tl); | 84 | void i915_timeline_fini(struct i915_timeline *tl); |
85 | 85 | ||
86 | static inline void | ||
87 | i915_timeline_set_subclass(struct i915_timeline *timeline, | ||
88 | unsigned int subclass) | ||
89 | { | ||
90 | lockdep_set_subclass(&timeline->lock, subclass); | ||
91 | |||
92 | /* | ||
93 | * Due to an interesting quirk in lockdep's internal debug tracking, | ||
94 | * after setting a subclass we must ensure the lock is used. Otherwise, | ||
95 | * nr_unused_locks is incremented once too often. | ||
96 | */ | ||
97 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
98 | local_irq_disable(); | ||
99 | lock_map_acquire(&timeline->lock.dep_map); | ||
100 | lock_map_release(&timeline->lock.dep_map); | ||
101 | local_irq_enable(); | ||
102 | #endif | ||
103 | } | ||
104 | |||
86 | struct i915_timeline * | 105 | struct i915_timeline * |
87 | i915_timeline_create(struct drm_i915_private *i915, const char *name); | 106 | i915_timeline_create(struct drm_i915_private *i915, const char *name); |
88 | 107 | ||
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 395dd2511568..5858a43e19da 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h | |||
@@ -68,7 +68,7 @@ | |||
68 | 68 | ||
69 | /* Note we don't consider signbits :| */ | 69 | /* Note we don't consider signbits :| */ |
70 | #define overflows_type(x, T) \ | 70 | #define overflows_type(x, T) \ |
71 | (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE)) | 71 | (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) |
72 | 72 | ||
73 | #define ptr_mask_bits(ptr, n) ({ \ | 73 | #define ptr_mask_bits(ptr, n) ({ \ |
74 | unsigned long __v = (unsigned long)(ptr); \ | 74 | unsigned long __v = (unsigned long)(ptr); \ |
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 35fce4c88629..5b4d78cdb4ca 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | |||
305 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 305 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
306 | GEM_BUG_ON(vma->size > vma->node.size); | 306 | GEM_BUG_ON(vma->size > vma->node.size); |
307 | 307 | ||
308 | if (GEM_WARN_ON(range_overflows(vma->node.start, | 308 | if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start, |
309 | vma->node.size, | 309 | vma->node.size, |
310 | vma->vm->total))) | 310 | vma->vm->total))) |
311 | return -ENODEV; | 311 | return -ENODEV; |
312 | 312 | ||
313 | if (GEM_WARN_ON(!flags)) | 313 | if (GEM_DEBUG_WARN_ON(!flags)) |
314 | return -EINVAL; | 314 | return -EINVAL; |
315 | 315 | ||
316 | bind_flags = 0; | 316 | bind_flags = 0; |
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 13830e43a4d1..01f422df8c23 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c | |||
@@ -25,8 +25,153 @@ | |||
25 | * Jani Nikula <jani.nikula@intel.com> | 25 | * Jani Nikula <jani.nikula@intel.com> |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <drm/drm_mipi_dsi.h> | ||
28 | #include "intel_dsi.h" | 29 | #include "intel_dsi.h" |
29 | 30 | ||
31 | static inline int header_credits_available(struct drm_i915_private *dev_priv, | ||
32 | enum transcoder dsi_trans) | ||
33 | { | ||
34 | return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK) | ||
35 | >> FREE_HEADER_CREDIT_SHIFT; | ||
36 | } | ||
37 | |||
38 | static inline int payload_credits_available(struct drm_i915_private *dev_priv, | ||
39 | enum transcoder dsi_trans) | ||
40 | { | ||
41 | return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK) | ||
42 | >> FREE_PLOAD_CREDIT_SHIFT; | ||
43 | } | ||
44 | |||
45 | static void wait_for_header_credits(struct drm_i915_private *dev_priv, | ||
46 | enum transcoder dsi_trans) | ||
47 | { | ||
48 | if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >= | ||
49 | MAX_HEADER_CREDIT, 100)) | ||
50 | DRM_ERROR("DSI header credits not released\n"); | ||
51 | } | ||
52 | |||
53 | static void wait_for_payload_credits(struct drm_i915_private *dev_priv, | ||
54 | enum transcoder dsi_trans) | ||
55 | { | ||
56 | if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >= | ||
57 | MAX_PLOAD_CREDIT, 100)) | ||
58 | DRM_ERROR("DSI payload credits not released\n"); | ||
59 | } | ||
60 | |||
61 | static enum transcoder dsi_port_to_transcoder(enum port port) | ||
62 | { | ||
63 | if (port == PORT_A) | ||
64 | return TRANSCODER_DSI_0; | ||
65 | else | ||
66 | return TRANSCODER_DSI_1; | ||
67 | } | ||
68 | |||
69 | static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) | ||
70 | { | ||
71 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
72 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
73 | struct mipi_dsi_device *dsi; | ||
74 | enum port port; | ||
75 | enum transcoder dsi_trans; | ||
76 | int ret; | ||
77 | |||
78 | /* wait for header/payload credits to be released */ | ||
79 | for_each_dsi_port(port, intel_dsi->ports) { | ||
80 | dsi_trans = dsi_port_to_transcoder(port); | ||
81 | wait_for_header_credits(dev_priv, dsi_trans); | ||
82 | wait_for_payload_credits(dev_priv, dsi_trans); | ||
83 | } | ||
84 | |||
85 | /* send nop DCS command */ | ||
86 | for_each_dsi_port(port, intel_dsi->ports) { | ||
87 | dsi = intel_dsi->dsi_hosts[port]->device; | ||
88 | dsi->mode_flags |= MIPI_DSI_MODE_LPM; | ||
89 | dsi->channel = 0; | ||
90 | ret = mipi_dsi_dcs_nop(dsi); | ||
91 | if (ret < 0) | ||
92 | DRM_ERROR("error sending DCS NOP command\n"); | ||
93 | } | ||
94 | |||
95 | /* wait for header credits to be released */ | ||
96 | for_each_dsi_port(port, intel_dsi->ports) { | ||
97 | dsi_trans = dsi_port_to_transcoder(port); | ||
98 | wait_for_header_credits(dev_priv, dsi_trans); | ||
99 | } | ||
100 | |||
101 | /* wait for LP TX in progress bit to be cleared */ | ||
102 | for_each_dsi_port(port, intel_dsi->ports) { | ||
103 | dsi_trans = dsi_port_to_transcoder(port); | ||
104 | if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) & | ||
105 | LPTX_IN_PROGRESS), 20)) | ||
106 | DRM_ERROR("LPTX bit not cleared\n"); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) | ||
111 | { | ||
112 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
113 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
114 | enum port port; | ||
115 | u32 tmp; | ||
116 | int lane; | ||
117 | |||
118 | for_each_dsi_port(port, intel_dsi->ports) { | ||
119 | |||
120 | /* | ||
121 | * Program voltage swing and pre-emphasis level values as per | ||
122 | * table in BSPEC under DDI buffer programing | ||
123 | */ | ||
124 | tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
125 | tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); | ||
126 | tmp |= SCALING_MODE_SEL(0x2); | ||
127 | tmp |= TAP2_DISABLE | TAP3_DISABLE; | ||
128 | tmp |= RTERM_SELECT(0x6); | ||
129 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); | ||
130 | |||
131 | tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); | ||
132 | tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); | ||
133 | tmp |= SCALING_MODE_SEL(0x2); | ||
134 | tmp |= TAP2_DISABLE | TAP3_DISABLE; | ||
135 | tmp |= RTERM_SELECT(0x6); | ||
136 | I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); | ||
137 | |||
138 | tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | ||
139 | tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | ||
140 | RCOMP_SCALAR_MASK); | ||
141 | tmp |= SWING_SEL_UPPER(0x2); | ||
142 | tmp |= SWING_SEL_LOWER(0x2); | ||
143 | tmp |= RCOMP_SCALAR(0x98); | ||
144 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); | ||
145 | |||
146 | tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); | ||
147 | tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | ||
148 | RCOMP_SCALAR_MASK); | ||
149 | tmp |= SWING_SEL_UPPER(0x2); | ||
150 | tmp |= SWING_SEL_LOWER(0x2); | ||
151 | tmp |= RCOMP_SCALAR(0x98); | ||
152 | I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); | ||
153 | |||
154 | tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); | ||
155 | tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | ||
156 | CURSOR_COEFF_MASK); | ||
157 | tmp |= POST_CURSOR_1(0x0); | ||
158 | tmp |= POST_CURSOR_2(0x0); | ||
159 | tmp |= CURSOR_COEFF(0x3f); | ||
160 | I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); | ||
161 | |||
162 | for (lane = 0; lane <= 3; lane++) { | ||
163 | /* Bspec: must not use GRP register for write */ | ||
164 | tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); | ||
165 | tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | ||
166 | CURSOR_COEFF_MASK); | ||
167 | tmp |= POST_CURSOR_1(0x0); | ||
168 | tmp |= POST_CURSOR_2(0x0); | ||
169 | tmp |= CURSOR_COEFF(0x3f); | ||
170 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); | ||
171 | } | ||
172 | } | ||
173 | } | ||
174 | |||
30 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) | 175 | static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder) |
31 | { | 176 | { |
32 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 177 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
@@ -105,10 +250,553 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder) | |||
105 | } | 250 | } |
106 | } | 251 | } |
107 | 252 | ||
108 | static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder) | 253 | static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) |
254 | { | ||
255 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
256 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
257 | enum port port; | ||
258 | u32 tmp; | ||
259 | int lane; | ||
260 | |||
261 | /* Step 4b(i) set loadgen select for transmit and aux lanes */ | ||
262 | for_each_dsi_port(port, intel_dsi->ports) { | ||
263 | tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port)); | ||
264 | tmp &= ~LOADGEN_SELECT; | ||
265 | I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); | ||
266 | for (lane = 0; lane <= 3; lane++) { | ||
267 | tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); | ||
268 | tmp &= ~LOADGEN_SELECT; | ||
269 | if (lane != 2) | ||
270 | tmp |= LOADGEN_SELECT; | ||
271 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | /* Step 4b(ii) set latency optimization for transmit and aux lanes */ | ||
276 | for_each_dsi_port(port, intel_dsi->ports) { | ||
277 | tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port)); | ||
278 | tmp &= ~FRC_LATENCY_OPTIM_MASK; | ||
279 | tmp |= FRC_LATENCY_OPTIM_VAL(0x5); | ||
280 | I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp); | ||
281 | tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | ||
282 | tmp &= ~FRC_LATENCY_OPTIM_MASK; | ||
283 | tmp |= FRC_LATENCY_OPTIM_VAL(0x5); | ||
284 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp); | ||
285 | } | ||
286 | |||
287 | } | ||
288 | |||
289 | static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) | ||
290 | { | ||
291 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
292 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
293 | u32 tmp; | ||
294 | enum port port; | ||
295 | |||
296 | /* clear common keeper enable bit */ | ||
297 | for_each_dsi_port(port, intel_dsi->ports) { | ||
298 | tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port)); | ||
299 | tmp &= ~COMMON_KEEPER_EN; | ||
300 | I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp); | ||
301 | tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port)); | ||
302 | tmp &= ~COMMON_KEEPER_EN; | ||
303 | I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp); | ||
304 | } | ||
305 | |||
306 | /* | ||
307 | * Set SUS Clock Config bitfield to 11b | ||
308 | * Note: loadgen select program is done | ||
309 | * as part of lane phy sequence configuration | ||
310 | */ | ||
311 | for_each_dsi_port(port, intel_dsi->ports) { | ||
312 | tmp = I915_READ(ICL_PORT_CL_DW5(port)); | ||
313 | tmp |= SUS_CLOCK_CONFIG; | ||
314 | I915_WRITE(ICL_PORT_CL_DW5(port), tmp); | ||
315 | } | ||
316 | |||
317 | /* Clear training enable to change swing values */ | ||
318 | for_each_dsi_port(port, intel_dsi->ports) { | ||
319 | tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
320 | tmp &= ~TX_TRAINING_EN; | ||
321 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); | ||
322 | tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); | ||
323 | tmp &= ~TX_TRAINING_EN; | ||
324 | I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); | ||
325 | } | ||
326 | |||
327 | /* Program swing and de-emphasis */ | ||
328 | dsi_program_swing_and_deemphasis(encoder); | ||
329 | |||
330 | /* Set training enable to trigger update */ | ||
331 | for_each_dsi_port(port, intel_dsi->ports) { | ||
332 | tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
333 | tmp |= TX_TRAINING_EN; | ||
334 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp); | ||
335 | tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port)); | ||
336 | tmp |= TX_TRAINING_EN; | ||
337 | I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp); | ||
338 | } | ||
339 | } | ||
340 | |||
341 | static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) | ||
342 | { | ||
343 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
344 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
345 | u32 tmp; | ||
346 | enum port port; | ||
347 | |||
348 | for_each_dsi_port(port, intel_dsi->ports) { | ||
349 | tmp = I915_READ(DDI_BUF_CTL(port)); | ||
350 | tmp |= DDI_BUF_CTL_ENABLE; | ||
351 | I915_WRITE(DDI_BUF_CTL(port), tmp); | ||
352 | |||
353 | if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) & | ||
354 | DDI_BUF_IS_IDLE), | ||
355 | 500)) | ||
356 | DRM_ERROR("DDI port:%c buffer idle\n", port_name(port)); | ||
357 | } | ||
358 | } | ||
359 | |||
360 | static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder) | ||
361 | { | ||
362 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
363 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
364 | u32 tmp; | ||
365 | enum port port; | ||
366 | |||
367 | /* Program T-INIT master registers */ | ||
368 | for_each_dsi_port(port, intel_dsi->ports) { | ||
369 | tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port)); | ||
370 | tmp &= ~MASTER_INIT_TIMER_MASK; | ||
371 | tmp |= intel_dsi->init_count; | ||
372 | I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp); | ||
373 | } | ||
374 | |||
375 | /* Program DPHY clock lanes timings */ | ||
376 | for_each_dsi_port(port, intel_dsi->ports) { | ||
377 | I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); | ||
378 | |||
379 | /* shadow register inside display core */ | ||
380 | I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg); | ||
381 | } | ||
382 | |||
383 | /* Program DPHY data lanes timings */ | ||
384 | for_each_dsi_port(port, intel_dsi->ports) { | ||
385 | I915_WRITE(DPHY_DATA_TIMING_PARAM(port), | ||
386 | intel_dsi->dphy_data_lane_reg); | ||
387 | |||
388 | /* shadow register inside display core */ | ||
389 | I915_WRITE(DSI_DATA_TIMING_PARAM(port), | ||
390 | intel_dsi->dphy_data_lane_reg); | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * If DSI link operating at or below an 800 MHz, | ||
395 | * TA_SURE should be override and programmed to | ||
396 | * a value '0' inside TA_PARAM_REGISTERS otherwise | ||
397 | * leave all fields at HW default values. | ||
398 | */ | ||
399 | if (intel_dsi_bitrate(intel_dsi) <= 800000) { | ||
400 | for_each_dsi_port(port, intel_dsi->ports) { | ||
401 | tmp = I915_READ(DPHY_TA_TIMING_PARAM(port)); | ||
402 | tmp &= ~TA_SURE_MASK; | ||
403 | tmp |= TA_SURE_OVERRIDE | TA_SURE(0); | ||
404 | I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp); | ||
405 | |||
406 | /* shadow register inside display core */ | ||
407 | tmp = I915_READ(DSI_TA_TIMING_PARAM(port)); | ||
408 | tmp &= ~TA_SURE_MASK; | ||
409 | tmp |= TA_SURE_OVERRIDE | TA_SURE(0); | ||
410 | I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp); | ||
411 | } | ||
412 | } | ||
413 | } | ||
414 | |||
415 | static void | ||
416 | gen11_dsi_configure_transcoder(struct intel_encoder *encoder, | ||
417 | const struct intel_crtc_state *pipe_config) | ||
418 | { | ||
419 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
420 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
421 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); | ||
422 | enum pipe pipe = intel_crtc->pipe; | ||
423 | u32 tmp; | ||
424 | enum port port; | ||
425 | enum transcoder dsi_trans; | ||
426 | |||
427 | for_each_dsi_port(port, intel_dsi->ports) { | ||
428 | dsi_trans = dsi_port_to_transcoder(port); | ||
429 | tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); | ||
430 | |||
431 | if (intel_dsi->eotp_pkt) | ||
432 | tmp &= ~EOTP_DISABLED; | ||
433 | else | ||
434 | tmp |= EOTP_DISABLED; | ||
435 | |||
436 | /* enable link calibration if freq > 1.5Gbps */ | ||
437 | if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) { | ||
438 | tmp &= ~LINK_CALIBRATION_MASK; | ||
439 | tmp |= CALIBRATION_ENABLED_INITIAL_ONLY; | ||
440 | } | ||
441 | |||
442 | /* configure continuous clock */ | ||
443 | tmp &= ~CONTINUOUS_CLK_MASK; | ||
444 | if (intel_dsi->clock_stop) | ||
445 | tmp |= CLK_ENTER_LP_AFTER_DATA; | ||
446 | else | ||
447 | tmp |= CLK_HS_CONTINUOUS; | ||
448 | |||
449 | /* configure buffer threshold limit to minimum */ | ||
450 | tmp &= ~PIX_BUF_THRESHOLD_MASK; | ||
451 | tmp |= PIX_BUF_THRESHOLD_1_4; | ||
452 | |||
453 | /* set virtual channel to '0' */ | ||
454 | tmp &= ~PIX_VIRT_CHAN_MASK; | ||
455 | tmp |= PIX_VIRT_CHAN(0); | ||
456 | |||
457 | /* program BGR transmission */ | ||
458 | if (intel_dsi->bgr_enabled) | ||
459 | tmp |= BGR_TRANSMISSION; | ||
460 | |||
461 | /* select pixel format */ | ||
462 | tmp &= ~PIX_FMT_MASK; | ||
463 | switch (intel_dsi->pixel_format) { | ||
464 | default: | ||
465 | MISSING_CASE(intel_dsi->pixel_format); | ||
466 | /* fallthrough */ | ||
467 | case MIPI_DSI_FMT_RGB565: | ||
468 | tmp |= PIX_FMT_RGB565; | ||
469 | break; | ||
470 | case MIPI_DSI_FMT_RGB666_PACKED: | ||
471 | tmp |= PIX_FMT_RGB666_PACKED; | ||
472 | break; | ||
473 | case MIPI_DSI_FMT_RGB666: | ||
474 | tmp |= PIX_FMT_RGB666_LOOSE; | ||
475 | break; | ||
476 | case MIPI_DSI_FMT_RGB888: | ||
477 | tmp |= PIX_FMT_RGB888; | ||
478 | break; | ||
479 | } | ||
480 | |||
481 | /* program DSI operation mode */ | ||
482 | if (is_vid_mode(intel_dsi)) { | ||
483 | tmp &= ~OP_MODE_MASK; | ||
484 | switch (intel_dsi->video_mode_format) { | ||
485 | default: | ||
486 | MISSING_CASE(intel_dsi->video_mode_format); | ||
487 | /* fallthrough */ | ||
488 | case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS: | ||
489 | tmp |= VIDEO_MODE_SYNC_EVENT; | ||
490 | break; | ||
491 | case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE: | ||
492 | tmp |= VIDEO_MODE_SYNC_PULSE; | ||
493 | break; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp); | ||
498 | } | ||
499 | |||
500 | /* enable port sync mode if dual link */ | ||
501 | if (intel_dsi->dual_link) { | ||
502 | for_each_dsi_port(port, intel_dsi->ports) { | ||
503 | dsi_trans = dsi_port_to_transcoder(port); | ||
504 | tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); | ||
505 | tmp |= PORT_SYNC_MODE_ENABLE; | ||
506 | I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); | ||
507 | } | ||
508 | |||
509 | //TODO: configure DSS_CTL1 | ||
510 | } | ||
511 | |||
512 | for_each_dsi_port(port, intel_dsi->ports) { | ||
513 | dsi_trans = dsi_port_to_transcoder(port); | ||
514 | |||
515 | /* select data lane width */ | ||
516 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); | ||
517 | tmp &= ~DDI_PORT_WIDTH_MASK; | ||
518 | tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); | ||
519 | |||
520 | /* select input pipe */ | ||
521 | tmp &= ~TRANS_DDI_EDP_INPUT_MASK; | ||
522 | switch (pipe) { | ||
523 | default: | ||
524 | MISSING_CASE(pipe); | ||
525 | /* fallthrough */ | ||
526 | case PIPE_A: | ||
527 | tmp |= TRANS_DDI_EDP_INPUT_A_ON; | ||
528 | break; | ||
529 | case PIPE_B: | ||
530 | tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF; | ||
531 | break; | ||
532 | case PIPE_C: | ||
533 | tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF; | ||
534 | break; | ||
535 | } | ||
536 | |||
537 | /* enable DDI buffer */ | ||
538 | tmp |= TRANS_DDI_FUNC_ENABLE; | ||
539 | I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); | ||
540 | } | ||
541 | |||
542 | /* wait for link ready */ | ||
543 | for_each_dsi_port(port, intel_dsi->ports) { | ||
544 | dsi_trans = dsi_port_to_transcoder(port); | ||
545 | if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) & | ||
546 | LINK_READY), 2500)) | ||
547 | DRM_ERROR("DSI link not ready\n"); | ||
548 | } | ||
549 | } | ||
550 | |||
551 | static void | ||
552 | gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, | ||
553 | const struct intel_crtc_state *pipe_config) | ||
554 | { | ||
555 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
556 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
557 | const struct drm_display_mode *adjusted_mode = | ||
558 | &pipe_config->base.adjusted_mode; | ||
559 | enum port port; | ||
560 | enum transcoder dsi_trans; | ||
561 | /* horizontal timings */ | ||
562 | u16 htotal, hactive, hsync_start, hsync_end, hsync_size; | ||
563 | u16 hfront_porch, hback_porch; | ||
564 | /* vertical timings */ | ||
565 | u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift; | ||
566 | |||
567 | hactive = adjusted_mode->crtc_hdisplay; | ||
568 | htotal = adjusted_mode->crtc_htotal; | ||
569 | hsync_start = adjusted_mode->crtc_hsync_start; | ||
570 | hsync_end = adjusted_mode->crtc_hsync_end; | ||
571 | hsync_size = hsync_end - hsync_start; | ||
572 | hfront_porch = (adjusted_mode->crtc_hsync_start - | ||
573 | adjusted_mode->crtc_hdisplay); | ||
574 | hback_porch = (adjusted_mode->crtc_htotal - | ||
575 | adjusted_mode->crtc_hsync_end); | ||
576 | vactive = adjusted_mode->crtc_vdisplay; | ||
577 | vtotal = adjusted_mode->crtc_vtotal; | ||
578 | vsync_start = adjusted_mode->crtc_vsync_start; | ||
579 | vsync_end = adjusted_mode->crtc_vsync_end; | ||
580 | vsync_shift = hsync_start - htotal / 2; | ||
581 | |||
582 | if (intel_dsi->dual_link) { | ||
583 | hactive /= 2; | ||
584 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) | ||
585 | hactive += intel_dsi->pixel_overlap; | ||
586 | htotal /= 2; | ||
587 | } | ||
588 | |||
589 | /* minimum hactive as per bspec: 256 pixels */ | ||
590 | if (adjusted_mode->crtc_hdisplay < 256) | ||
591 | DRM_ERROR("hactive is less then 256 pixels\n"); | ||
592 | |||
593 | /* if RGB666 format, then hactive must be multiple of 4 pixels */ | ||
594 | if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0) | ||
595 | DRM_ERROR("hactive pixels are not multiple of 4\n"); | ||
596 | |||
597 | /* program TRANS_HTOTAL register */ | ||
598 | for_each_dsi_port(port, intel_dsi->ports) { | ||
599 | dsi_trans = dsi_port_to_transcoder(port); | ||
600 | I915_WRITE(HTOTAL(dsi_trans), | ||
601 | (hactive - 1) | ((htotal - 1) << 16)); | ||
602 | } | ||
603 | |||
604 | /* TRANS_HSYNC register to be programmed only for video mode */ | ||
605 | if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) { | ||
606 | if (intel_dsi->video_mode_format == | ||
607 | VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) { | ||
608 | /* BSPEC: hsync size should be atleast 16 pixels */ | ||
609 | if (hsync_size < 16) | ||
610 | DRM_ERROR("hsync size < 16 pixels\n"); | ||
611 | } | ||
612 | |||
613 | if (hback_porch < 16) | ||
614 | DRM_ERROR("hback porch < 16 pixels\n"); | ||
615 | |||
616 | if (intel_dsi->dual_link) { | ||
617 | hsync_start /= 2; | ||
618 | hsync_end /= 2; | ||
619 | } | ||
620 | |||
621 | for_each_dsi_port(port, intel_dsi->ports) { | ||
622 | dsi_trans = dsi_port_to_transcoder(port); | ||
623 | I915_WRITE(HSYNC(dsi_trans), | ||
624 | (hsync_start - 1) | ((hsync_end - 1) << 16)); | ||
625 | } | ||
626 | } | ||
627 | |||
628 | /* program TRANS_VTOTAL register */ | ||
629 | for_each_dsi_port(port, intel_dsi->ports) { | ||
630 | dsi_trans = dsi_port_to_transcoder(port); | ||
631 | /* | ||
632 | * FIXME: Programing this by assuming progressive mode, since | ||
633 | * non-interlaced info from VBT is not saved inside | ||
634 | * struct drm_display_mode. | ||
635 | * For interlace mode: program required pixel minus 2 | ||
636 | */ | ||
637 | I915_WRITE(VTOTAL(dsi_trans), | ||
638 | (vactive - 1) | ((vtotal - 1) << 16)); | ||
639 | } | ||
640 | |||
641 | if (vsync_end < vsync_start || vsync_end > vtotal) | ||
642 | DRM_ERROR("Invalid vsync_end value\n"); | ||
643 | |||
644 | if (vsync_start < vactive) | ||
645 | DRM_ERROR("vsync_start less than vactive\n"); | ||
646 | |||
647 | /* program TRANS_VSYNC register */ | ||
648 | for_each_dsi_port(port, intel_dsi->ports) { | ||
649 | dsi_trans = dsi_port_to_transcoder(port); | ||
650 | I915_WRITE(VSYNC(dsi_trans), | ||
651 | (vsync_start - 1) | ((vsync_end - 1) << 16)); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * FIXME: It has to be programmed only for interlaced | ||
656 | * modes. Put the check condition here once interlaced | ||
657 | * info available as described above. | ||
658 | * program TRANS_VSYNCSHIFT register | ||
659 | */ | ||
660 | for_each_dsi_port(port, intel_dsi->ports) { | ||
661 | dsi_trans = dsi_port_to_transcoder(port); | ||
662 | I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift); | ||
663 | } | ||
664 | } | ||
665 | |||
666 | static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) | ||
667 | { | ||
668 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
669 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
670 | enum port port; | ||
671 | enum transcoder dsi_trans; | ||
672 | u32 tmp; | ||
673 | |||
674 | for_each_dsi_port(port, intel_dsi->ports) { | ||
675 | dsi_trans = dsi_port_to_transcoder(port); | ||
676 | tmp = I915_READ(PIPECONF(dsi_trans)); | ||
677 | tmp |= PIPECONF_ENABLE; | ||
678 | I915_WRITE(PIPECONF(dsi_trans), tmp); | ||
679 | |||
680 | /* wait for transcoder to be enabled */ | ||
681 | if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), | ||
682 | I965_PIPECONF_ACTIVE, | ||
683 | I965_PIPECONF_ACTIVE, 10)) | ||
684 | DRM_ERROR("DSI transcoder not enabled\n"); | ||
685 | } | ||
686 | } | ||
687 | |||
688 | static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder) | ||
689 | { | ||
690 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
691 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
692 | enum port port; | ||
693 | enum transcoder dsi_trans; | ||
694 | u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; | ||
695 | |||
696 | /* | ||
697 | * escape clock count calculation: | ||
698 | * BYTE_CLK_COUNT = TIME_NS/(8 * UI) | ||
699 | * UI (nsec) = (10^6)/Bitrate | ||
700 | * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate | ||
701 | * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS | ||
702 | */ | ||
703 | divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000; | ||
704 | mul = 8 * 1000000; | ||
705 | hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul, | ||
706 | divisor); | ||
707 | lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor); | ||
708 | ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor); | ||
709 | |||
710 | for_each_dsi_port(port, intel_dsi->ports) { | ||
711 | dsi_trans = dsi_port_to_transcoder(port); | ||
712 | |||
713 | /* program hst_tx_timeout */ | ||
714 | tmp = I915_READ(DSI_HSTX_TO(dsi_trans)); | ||
715 | tmp &= ~HSTX_TIMEOUT_VALUE_MASK; | ||
716 | tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); | ||
717 | I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp); | ||
718 | |||
719 | /* FIXME: DSI_CALIB_TO */ | ||
720 | |||
721 | /* program lp_rx_host timeout */ | ||
722 | tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans)); | ||
723 | tmp &= ~LPRX_TIMEOUT_VALUE_MASK; | ||
724 | tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); | ||
725 | I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp); | ||
726 | |||
727 | /* FIXME: DSI_PWAIT_TO */ | ||
728 | |||
729 | /* program turn around timeout */ | ||
730 | tmp = I915_READ(DSI_TA_TO(dsi_trans)); | ||
731 | tmp &= ~TA_TIMEOUT_VALUE_MASK; | ||
732 | tmp |= TA_TIMEOUT_VALUE(ta_timeout); | ||
733 | I915_WRITE(DSI_TA_TO(dsi_trans), tmp); | ||
734 | } | ||
735 | } | ||
736 | |||
737 | static void | ||
738 | gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, | ||
739 | const struct intel_crtc_state *pipe_config) | ||
109 | { | 740 | { |
110 | /* step 4a: power up all lanes of the DDI used by DSI */ | 741 | /* step 4a: power up all lanes of the DDI used by DSI */ |
111 | gen11_dsi_power_up_lanes(encoder); | 742 | gen11_dsi_power_up_lanes(encoder); |
743 | |||
744 | /* step 4b: configure lane sequencing of the Combo-PHY transmitters */ | ||
745 | gen11_dsi_config_phy_lanes_sequence(encoder); | ||
746 | |||
747 | /* step 4c: configure voltage swing and skew */ | ||
748 | gen11_dsi_voltage_swing_program_seq(encoder); | ||
749 | |||
750 | /* enable DDI buffer */ | ||
751 | gen11_dsi_enable_ddi_buffer(encoder); | ||
752 | |||
753 | /* setup D-PHY timings */ | ||
754 | gen11_dsi_setup_dphy_timings(encoder); | ||
755 | |||
756 | /* step 4h: setup DSI protocol timeouts */ | ||
757 | gen11_dsi_setup_timeouts(encoder); | ||
758 | |||
759 | /* Step (4h, 4i, 4j, 4k): Configure transcoder */ | ||
760 | gen11_dsi_configure_transcoder(encoder, pipe_config); | ||
761 | } | ||
762 | |||
763 | static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) | ||
764 | { | ||
765 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
766 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
767 | struct mipi_dsi_device *dsi; | ||
768 | enum port port; | ||
769 | enum transcoder dsi_trans; | ||
770 | u32 tmp; | ||
771 | int ret; | ||
772 | |||
773 | /* set maximum return packet size */ | ||
774 | for_each_dsi_port(port, intel_dsi->ports) { | ||
775 | dsi_trans = dsi_port_to_transcoder(port); | ||
776 | |||
777 | /* | ||
778 | * FIXME: This uses the number of DW's currently in the payload | ||
779 | * receive queue. This is probably not what we want here. | ||
780 | */ | ||
781 | tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans)); | ||
782 | tmp &= NUMBER_RX_PLOAD_DW_MASK; | ||
783 | /* multiply "Number Rx Payload DW" by 4 to get max value */ | ||
784 | tmp = tmp * 4; | ||
785 | dsi = intel_dsi->dsi_hosts[port]->device; | ||
786 | ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp); | ||
787 | if (ret < 0) | ||
788 | DRM_ERROR("error setting max return pkt size%d\n", tmp); | ||
789 | } | ||
790 | |||
791 | /* panel power on related mipi dsi vbt sequences */ | ||
792 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); | ||
793 | intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); | ||
794 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); | ||
795 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP); | ||
796 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON); | ||
797 | |||
798 | /* ensure all panel commands dispatched before enabling transcoder */ | ||
799 | wait_for_cmds_dispatched_to_panel(encoder); | ||
112 | } | 800 | } |
113 | 801 | ||
114 | static void __attribute__((unused)) | 802 | static void __attribute__((unused)) |
@@ -116,6 +804,8 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder, | |||
116 | const struct intel_crtc_state *pipe_config, | 804 | const struct intel_crtc_state *pipe_config, |
117 | const struct drm_connector_state *conn_state) | 805 | const struct drm_connector_state *conn_state) |
118 | { | 806 | { |
807 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
808 | |||
119 | /* step2: enable IO power */ | 809 | /* step2: enable IO power */ |
120 | gen11_dsi_enable_io_power(encoder); | 810 | gen11_dsi_enable_io_power(encoder); |
121 | 811 | ||
@@ -123,5 +813,169 @@ gen11_dsi_pre_enable(struct intel_encoder *encoder, | |||
123 | gen11_dsi_program_esc_clk_div(encoder); | 813 | gen11_dsi_program_esc_clk_div(encoder); |
124 | 814 | ||
125 | /* step4: enable DSI port and DPHY */ | 815 | /* step4: enable DSI port and DPHY */ |
126 | gen11_dsi_enable_port_and_phy(encoder); | 816 | gen11_dsi_enable_port_and_phy(encoder, pipe_config); |
817 | |||
818 | /* step5: program and powerup panel */ | ||
819 | gen11_dsi_powerup_panel(encoder); | ||
820 | |||
821 | /* step6c: configure transcoder timings */ | ||
822 | gen11_dsi_set_transcoder_timings(encoder, pipe_config); | ||
823 | |||
824 | /* step6d: enable dsi transcoder */ | ||
825 | gen11_dsi_enable_transcoder(encoder); | ||
826 | |||
827 | /* step7: enable backlight */ | ||
828 | intel_panel_enable_backlight(pipe_config, conn_state); | ||
829 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON); | ||
830 | } | ||
831 | |||
832 | static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) | ||
833 | { | ||
834 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
835 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
836 | enum port port; | ||
837 | enum transcoder dsi_trans; | ||
838 | u32 tmp; | ||
839 | |||
840 | for_each_dsi_port(port, intel_dsi->ports) { | ||
841 | dsi_trans = dsi_port_to_transcoder(port); | ||
842 | |||
843 | /* disable transcoder */ | ||
844 | tmp = I915_READ(PIPECONF(dsi_trans)); | ||
845 | tmp &= ~PIPECONF_ENABLE; | ||
846 | I915_WRITE(PIPECONF(dsi_trans), tmp); | ||
847 | |||
848 | /* wait for transcoder to be disabled */ | ||
849 | if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), | ||
850 | I965_PIPECONF_ACTIVE, 0, 50)) | ||
851 | DRM_ERROR("DSI trancoder not disabled\n"); | ||
852 | } | ||
853 | } | ||
854 | |||
855 | static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder) | ||
856 | { | ||
857 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
858 | |||
859 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF); | ||
860 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET); | ||
861 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF); | ||
862 | |||
863 | /* ensure cmds dispatched to panel */ | ||
864 | wait_for_cmds_dispatched_to_panel(encoder); | ||
865 | } | ||
866 | |||
867 | static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) | ||
868 | { | ||
869 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
870 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
871 | enum port port; | ||
872 | enum transcoder dsi_trans; | ||
873 | u32 tmp; | ||
874 | |||
875 | /* put dsi link in ULPS */ | ||
876 | for_each_dsi_port(port, intel_dsi->ports) { | ||
877 | dsi_trans = dsi_port_to_transcoder(port); | ||
878 | tmp = I915_READ(DSI_LP_MSG(dsi_trans)); | ||
879 | tmp |= LINK_ENTER_ULPS; | ||
880 | tmp &= ~LINK_ULPS_TYPE_LP11; | ||
881 | I915_WRITE(DSI_LP_MSG(dsi_trans), tmp); | ||
882 | |||
883 | if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) & | ||
884 | LINK_IN_ULPS), | ||
885 | 10)) | ||
886 | DRM_ERROR("DSI link not in ULPS\n"); | ||
887 | } | ||
888 | |||
889 | /* disable ddi function */ | ||
890 | for_each_dsi_port(port, intel_dsi->ports) { | ||
891 | dsi_trans = dsi_port_to_transcoder(port); | ||
892 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); | ||
893 | tmp &= ~TRANS_DDI_FUNC_ENABLE; | ||
894 | I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp); | ||
895 | } | ||
896 | |||
897 | /* disable port sync mode if dual link */ | ||
898 | if (intel_dsi->dual_link) { | ||
899 | for_each_dsi_port(port, intel_dsi->ports) { | ||
900 | dsi_trans = dsi_port_to_transcoder(port); | ||
901 | tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans)); | ||
902 | tmp &= ~PORT_SYNC_MODE_ENABLE; | ||
903 | I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); | ||
904 | } | ||
905 | } | ||
906 | } | ||
907 | |||
908 | static void gen11_dsi_disable_port(struct intel_encoder *encoder) | ||
909 | { | ||
910 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
911 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
912 | u32 tmp; | ||
913 | enum port port; | ||
914 | |||
915 | for_each_dsi_port(port, intel_dsi->ports) { | ||
916 | tmp = I915_READ(DDI_BUF_CTL(port)); | ||
917 | tmp &= ~DDI_BUF_CTL_ENABLE; | ||
918 | I915_WRITE(DDI_BUF_CTL(port), tmp); | ||
919 | |||
920 | if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) & | ||
921 | DDI_BUF_IS_IDLE), | ||
922 | 8)) | ||
923 | DRM_ERROR("DDI port:%c buffer not idle\n", | ||
924 | port_name(port)); | ||
925 | } | ||
926 | } | ||
927 | |||
928 | static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) | ||
929 | { | ||
930 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
931 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
932 | enum port port; | ||
933 | u32 tmp; | ||
934 | |||
935 | intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO); | ||
936 | |||
937 | if (intel_dsi->dual_link) | ||
938 | intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO); | ||
939 | |||
940 | /* set mode to DDI */ | ||
941 | for_each_dsi_port(port, intel_dsi->ports) { | ||
942 | tmp = I915_READ(ICL_DSI_IO_MODECTL(port)); | ||
943 | tmp &= ~COMBO_PHY_MODE_DSI; | ||
944 | I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp); | ||
945 | } | ||
946 | } | ||
947 | |||
948 | static void __attribute__((unused)) gen11_dsi_disable( | ||
949 | struct intel_encoder *encoder, | ||
950 | const struct intel_crtc_state *old_crtc_state, | ||
951 | const struct drm_connector_state *old_conn_state) | ||
952 | { | ||
953 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); | ||
954 | |||
955 | /* step1: turn off backlight */ | ||
956 | intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); | ||
957 | intel_panel_disable_backlight(old_conn_state); | ||
958 | |||
959 | /* step2d,e: disable transcoder and wait */ | ||
960 | gen11_dsi_disable_transcoder(encoder); | ||
961 | |||
962 | /* step2f,g: powerdown panel */ | ||
963 | gen11_dsi_powerdown_panel(encoder); | ||
964 | |||
965 | /* step2h,i,j: deconfig trancoder */ | ||
966 | gen11_dsi_deconfigure_trancoder(encoder); | ||
967 | |||
968 | /* step3: disable port */ | ||
969 | gen11_dsi_disable_port(encoder); | ||
970 | |||
971 | /* step4: disable IO power */ | ||
972 | gen11_dsi_disable_io_power(encoder); | ||
973 | } | ||
974 | |||
975 | void icl_dsi_init(struct drm_i915_private *dev_priv) | ||
976 | { | ||
977 | enum port port; | ||
978 | |||
979 | if (!intel_bios_is_dsi_present(dev_priv, &port)) | ||
980 | return; | ||
127 | } | 981 | } |
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index b04952bacf77..a5a2c8fe58a7 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c | |||
@@ -203,6 +203,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, | |||
203 | drm_atomic_helper_crtc_destroy_state(crtc, state); | 203 | drm_atomic_helper_crtc_destroy_state(crtc, state); |
204 | } | 204 | } |
205 | 205 | ||
206 | static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, | ||
207 | int num_scalers_need, struct intel_crtc *intel_crtc, | ||
208 | const char *name, int idx, | ||
209 | struct intel_plane_state *plane_state, | ||
210 | int *scaler_id) | ||
211 | { | ||
212 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); | ||
213 | int j; | ||
214 | u32 mode; | ||
215 | |||
216 | if (*scaler_id < 0) { | ||
217 | /* find a free scaler */ | ||
218 | for (j = 0; j < intel_crtc->num_scalers; j++) { | ||
219 | if (scaler_state->scalers[j].in_use) | ||
220 | continue; | ||
221 | |||
222 | *scaler_id = j; | ||
223 | scaler_state->scalers[*scaler_id].in_use = 1; | ||
224 | break; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx)) | ||
229 | return; | ||
230 | |||
231 | /* set scaler mode */ | ||
232 | if (plane_state && plane_state->base.fb && | ||
233 | plane_state->base.fb->format->is_yuv && | ||
234 | plane_state->base.fb->format->num_planes > 1) { | ||
235 | if (IS_GEN9(dev_priv) && | ||
236 | !IS_GEMINILAKE(dev_priv)) { | ||
237 | mode = SKL_PS_SCALER_MODE_NV12; | ||
238 | } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) { | ||
239 | /* | ||
240 | * On gen11+'s HDR planes we only use the scaler for | ||
241 | * scaling. They have a dedicated chroma upsampler, so | ||
242 | * we don't need the scaler to upsample the UV plane. | ||
243 | */ | ||
244 | mode = PS_SCALER_MODE_NORMAL; | ||
245 | } else { | ||
246 | mode = PS_SCALER_MODE_PLANAR; | ||
247 | |||
248 | if (plane_state->linked_plane) | ||
249 | mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id); | ||
250 | } | ||
251 | } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) { | ||
252 | mode = PS_SCALER_MODE_NORMAL; | ||
253 | } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { | ||
254 | /* | ||
255 | * when only 1 scaler is in use on a pipe with 2 scalers | ||
256 | * scaler 0 operates in high quality (HQ) mode. | ||
257 | * In this case use scaler 0 to take advantage of HQ mode | ||
258 | */ | ||
259 | scaler_state->scalers[*scaler_id].in_use = 0; | ||
260 | *scaler_id = 0; | ||
261 | scaler_state->scalers[0].in_use = 1; | ||
262 | mode = SKL_PS_SCALER_MODE_HQ; | ||
263 | } else { | ||
264 | mode = SKL_PS_SCALER_MODE_DYN; | ||
265 | } | ||
266 | |||
267 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", | ||
268 | intel_crtc->pipe, *scaler_id, name, idx); | ||
269 | scaler_state->scalers[*scaler_id].mode = mode; | ||
270 | } | ||
271 | |||
206 | /** | 272 | /** |
207 | * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests | 273 | * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests |
208 | * @dev_priv: i915 device | 274 | * @dev_priv: i915 device |
@@ -232,7 +298,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, | |||
232 | struct drm_atomic_state *drm_state = crtc_state->base.state; | 298 | struct drm_atomic_state *drm_state = crtc_state->base.state; |
233 | struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); | 299 | struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); |
234 | int num_scalers_need; | 300 | int num_scalers_need; |
235 | int i, j; | 301 | int i; |
236 | 302 | ||
237 | num_scalers_need = hweight32(scaler_state->scaler_users); | 303 | num_scalers_need = hweight32(scaler_state->scaler_users); |
238 | 304 | ||
@@ -304,59 +370,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, | |||
304 | idx = plane->base.id; | 370 | idx = plane->base.id; |
305 | 371 | ||
306 | /* plane on different crtc cannot be a scaler user of this crtc */ | 372 | /* plane on different crtc cannot be a scaler user of this crtc */ |
307 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { | 373 | if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) |
308 | continue; | 374 | continue; |
309 | } | ||
310 | 375 | ||
311 | plane_state = intel_atomic_get_new_plane_state(intel_state, | 376 | plane_state = intel_atomic_get_new_plane_state(intel_state, |
312 | intel_plane); | 377 | intel_plane); |
313 | scaler_id = &plane_state->scaler_id; | 378 | scaler_id = &plane_state->scaler_id; |
314 | } | 379 | } |
315 | 380 | ||
316 | if (*scaler_id < 0) { | 381 | intel_atomic_setup_scaler(scaler_state, num_scalers_need, |
317 | /* find a free scaler */ | 382 | intel_crtc, name, idx, |
318 | for (j = 0; j < intel_crtc->num_scalers; j++) { | 383 | plane_state, scaler_id); |
319 | if (!scaler_state->scalers[j].in_use) { | ||
320 | scaler_state->scalers[j].in_use = 1; | ||
321 | *scaler_id = j; | ||
322 | DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", | ||
323 | intel_crtc->pipe, *scaler_id, name, idx); | ||
324 | break; | ||
325 | } | ||
326 | } | ||
327 | } | ||
328 | |||
329 | if (WARN_ON(*scaler_id < 0)) { | ||
330 | DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); | ||
331 | continue; | ||
332 | } | ||
333 | |||
334 | /* set scaler mode */ | ||
335 | if ((INTEL_GEN(dev_priv) >= 9) && | ||
336 | plane_state && plane_state->base.fb && | ||
337 | plane_state->base.fb->format->format == | ||
338 | DRM_FORMAT_NV12) { | ||
339 | if (INTEL_GEN(dev_priv) == 9 && | ||
340 | !IS_GEMINILAKE(dev_priv) && | ||
341 | !IS_SKYLAKE(dev_priv)) | ||
342 | scaler_state->scalers[*scaler_id].mode = | ||
343 | SKL_PS_SCALER_MODE_NV12; | ||
344 | else | ||
345 | scaler_state->scalers[*scaler_id].mode = | ||
346 | PS_SCALER_MODE_PLANAR; | ||
347 | } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { | ||
348 | /* | ||
349 | * when only 1 scaler is in use on either pipe A or B, | ||
350 | * scaler 0 operates in high quality (HQ) mode. | ||
351 | * In this case use scaler 0 to take advantage of HQ mode | ||
352 | */ | ||
353 | *scaler_id = 0; | ||
354 | scaler_state->scalers[0].in_use = 1; | ||
355 | scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; | ||
356 | scaler_state->scalers[1].in_use = 0; | ||
357 | } else { | ||
358 | scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; | ||
359 | } | ||
360 | } | 384 | } |
361 | 385 | ||
362 | return 0; | 386 | return 0; |
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index aabebe0d2e9b..905f8ef3ba4f 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c | |||
@@ -36,28 +36,31 @@ | |||
36 | #include <drm/drm_plane_helper.h> | 36 | #include <drm/drm_plane_helper.h> |
37 | #include "intel_drv.h" | 37 | #include "intel_drv.h" |
38 | 38 | ||
39 | /** | 39 | struct intel_plane *intel_plane_alloc(void) |
40 | * intel_create_plane_state - create plane state object | ||
41 | * @plane: drm plane | ||
42 | * | ||
43 | * Allocates a fresh plane state for the given plane and sets some of | ||
44 | * the state values to sensible initial values. | ||
45 | * | ||
46 | * Returns: A newly allocated plane state, or NULL on failure | ||
47 | */ | ||
48 | struct intel_plane_state * | ||
49 | intel_create_plane_state(struct drm_plane *plane) | ||
50 | { | 40 | { |
51 | struct intel_plane_state *state; | 41 | struct intel_plane_state *plane_state; |
42 | struct intel_plane *plane; | ||
52 | 43 | ||
53 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 44 | plane = kzalloc(sizeof(*plane), GFP_KERNEL); |
54 | if (!state) | 45 | if (!plane) |
55 | return NULL; | 46 | return ERR_PTR(-ENOMEM); |
56 | 47 | ||
57 | state->base.plane = plane; | 48 | plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL); |
58 | state->base.rotation = DRM_MODE_ROTATE_0; | 49 | if (!plane_state) { |
50 | kfree(plane); | ||
51 | return ERR_PTR(-ENOMEM); | ||
52 | } | ||
59 | 53 | ||
60 | return state; | 54 | __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base); |
55 | plane_state->scaler_id = -1; | ||
56 | |||
57 | return plane; | ||
58 | } | ||
59 | |||
60 | void intel_plane_free(struct intel_plane *plane) | ||
61 | { | ||
62 | intel_plane_destroy_state(&plane->base, plane->base.state); | ||
63 | kfree(plane); | ||
61 | } | 64 | } |
62 | 65 | ||
63 | /** | 66 | /** |
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ | |||
117 | struct intel_plane *intel_plane = to_intel_plane(plane); | 120 | struct intel_plane *intel_plane = to_intel_plane(plane); |
118 | int ret; | 121 | int ret; |
119 | 122 | ||
123 | crtc_state->active_planes &= ~BIT(intel_plane->id); | ||
124 | crtc_state->nv12_planes &= ~BIT(intel_plane->id); | ||
125 | intel_state->base.visible = false; | ||
126 | |||
127 | /* If this is a cursor plane, no further checks are needed. */ | ||
120 | if (!intel_state->base.crtc && !old_plane_state->base.crtc) | 128 | if (!intel_state->base.crtc && !old_plane_state->base.crtc) |
121 | return 0; | 129 | return 0; |
122 | 130 | ||
123 | intel_state->base.visible = false; | ||
124 | ret = intel_plane->check_plane(crtc_state, intel_state); | 131 | ret = intel_plane->check_plane(crtc_state, intel_state); |
125 | if (ret) | 132 | if (ret) |
126 | return ret; | 133 | return ret; |
@@ -128,13 +135,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ | |||
128 | /* FIXME pre-g4x don't work like this */ | 135 | /* FIXME pre-g4x don't work like this */ |
129 | if (state->visible) | 136 | if (state->visible) |
130 | crtc_state->active_planes |= BIT(intel_plane->id); | 137 | crtc_state->active_planes |= BIT(intel_plane->id); |
131 | else | ||
132 | crtc_state->active_planes &= ~BIT(intel_plane->id); | ||
133 | 138 | ||
134 | if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) | 139 | if (state->visible && state->fb->format->format == DRM_FORMAT_NV12) |
135 | crtc_state->nv12_planes |= BIT(intel_plane->id); | 140 | crtc_state->nv12_planes |= BIT(intel_plane->id); |
136 | else | ||
137 | crtc_state->nv12_planes &= ~BIT(intel_plane->id); | ||
138 | 141 | ||
139 | return intel_plane_atomic_calc_changes(old_crtc_state, | 142 | return intel_plane_atomic_calc_changes(old_crtc_state, |
140 | &crtc_state->base, | 143 | &crtc_state->base, |
@@ -152,6 +155,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
152 | const struct drm_crtc_state *old_crtc_state; | 155 | const struct drm_crtc_state *old_crtc_state; |
153 | struct drm_crtc_state *new_crtc_state; | 156 | struct drm_crtc_state *new_crtc_state; |
154 | 157 | ||
158 | new_plane_state->visible = false; | ||
155 | if (!crtc) | 159 | if (!crtc) |
156 | return 0; | 160 | return 0; |
157 | 161 | ||
@@ -164,29 +168,52 @@ static int intel_plane_atomic_check(struct drm_plane *plane, | |||
164 | to_intel_plane_state(new_plane_state)); | 168 | to_intel_plane_state(new_plane_state)); |
165 | } | 169 | } |
166 | 170 | ||
167 | static void intel_plane_atomic_update(struct drm_plane *plane, | 171 | void intel_update_planes_on_crtc(struct intel_atomic_state *old_state, |
168 | struct drm_plane_state *old_state) | 172 | struct intel_crtc *crtc, |
173 | struct intel_crtc_state *old_crtc_state, | ||
174 | struct intel_crtc_state *new_crtc_state) | ||
169 | { | 175 | { |
170 | struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); | 176 | struct intel_plane_state *new_plane_state; |
171 | struct intel_plane *intel_plane = to_intel_plane(plane); | 177 | struct intel_plane *plane; |
172 | const struct intel_plane_state *new_plane_state = | 178 | u32 update_mask; |
173 | intel_atomic_get_new_plane_state(state, intel_plane); | 179 | int i; |
174 | struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; | 180 | |
175 | 181 | update_mask = old_crtc_state->active_planes; | |
176 | if (new_plane_state->base.visible) { | 182 | update_mask |= new_crtc_state->active_planes; |
177 | const struct intel_crtc_state *new_crtc_state = | 183 | |
178 | intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); | 184 | for_each_new_intel_plane_in_state(old_state, plane, new_plane_state, i) { |
179 | 185 | if (crtc->pipe != plane->pipe || | |
180 | trace_intel_update_plane(plane, | 186 | !(update_mask & BIT(plane->id))) |
181 | to_intel_crtc(crtc)); | 187 | continue; |
182 | 188 | ||
183 | intel_plane->update_plane(intel_plane, | 189 | if (new_plane_state->base.visible) { |
184 | new_crtc_state, new_plane_state); | 190 | trace_intel_update_plane(&plane->base, crtc); |
185 | } else { | 191 | |
186 | trace_intel_disable_plane(plane, | 192 | plane->update_plane(plane, new_crtc_state, new_plane_state); |
187 | to_intel_crtc(crtc)); | 193 | } else if (new_plane_state->slave) { |
188 | 194 | struct intel_plane *master = | |
189 | intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); | 195 | new_plane_state->linked_plane; |
196 | |||
197 | /* | ||
198 | * We update the slave plane from this function because | ||
199 | * programming it from the master plane's update_plane | ||
200 | * callback runs into issues when the Y plane is | ||
201 | * reassigned, disabled or used by a different plane. | ||
202 | * | ||
203 | * The slave plane is updated with the master plane's | ||
204 | * plane_state. | ||
205 | */ | ||
206 | new_plane_state = | ||
207 | intel_atomic_get_new_plane_state(old_state, master); | ||
208 | |||
209 | trace_intel_update_plane(&plane->base, crtc); | ||
210 | |||
211 | plane->update_slave(plane, new_crtc_state, new_plane_state); | ||
212 | } else { | ||
213 | trace_intel_disable_plane(&plane->base, crtc); | ||
214 | |||
215 | plane->disable_plane(plane, crtc); | ||
216 | } | ||
190 | } | 217 | } |
191 | } | 218 | } |
192 | 219 | ||
@@ -194,7 +221,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = { | |||
194 | .prepare_fb = intel_prepare_plane_fb, | 221 | .prepare_fb = intel_prepare_plane_fb, |
195 | .cleanup_fb = intel_cleanup_plane_fb, | 222 | .cleanup_fb = intel_cleanup_plane_fb, |
196 | .atomic_check = intel_plane_atomic_check, | 223 | .atomic_check = intel_plane_atomic_check, |
197 | .atomic_update = intel_plane_atomic_update, | ||
198 | }; | 224 | }; |
199 | 225 | ||
200 | /** | 226 | /** |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 769f3f586661..ae55a6865d5c 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -144,26 +144,43 @@ static const struct { | |||
144 | /* HDMI N/CTS table */ | 144 | /* HDMI N/CTS table */ |
145 | #define TMDS_297M 297000 | 145 | #define TMDS_297M 297000 |
146 | #define TMDS_296M 296703 | 146 | #define TMDS_296M 296703 |
147 | #define TMDS_594M 594000 | ||
148 | #define TMDS_593M 593407 | ||
149 | |||
147 | static const struct { | 150 | static const struct { |
148 | int sample_rate; | 151 | int sample_rate; |
149 | int clock; | 152 | int clock; |
150 | int n; | 153 | int n; |
151 | int cts; | 154 | int cts; |
152 | } hdmi_aud_ncts[] = { | 155 | } hdmi_aud_ncts[] = { |
153 | { 44100, TMDS_296M, 4459, 234375 }, | ||
154 | { 44100, TMDS_297M, 4704, 247500 }, | ||
155 | { 48000, TMDS_296M, 5824, 281250 }, | ||
156 | { 48000, TMDS_297M, 5120, 247500 }, | ||
157 | { 32000, TMDS_296M, 5824, 421875 }, | 156 | { 32000, TMDS_296M, 5824, 421875 }, |
158 | { 32000, TMDS_297M, 3072, 222750 }, | 157 | { 32000, TMDS_297M, 3072, 222750 }, |
158 | { 32000, TMDS_593M, 5824, 843750 }, | ||
159 | { 32000, TMDS_594M, 3072, 445500 }, | ||
160 | { 44100, TMDS_296M, 4459, 234375 }, | ||
161 | { 44100, TMDS_297M, 4704, 247500 }, | ||
162 | { 44100, TMDS_593M, 8918, 937500 }, | ||
163 | { 44100, TMDS_594M, 9408, 990000 }, | ||
159 | { 88200, TMDS_296M, 8918, 234375 }, | 164 | { 88200, TMDS_296M, 8918, 234375 }, |
160 | { 88200, TMDS_297M, 9408, 247500 }, | 165 | { 88200, TMDS_297M, 9408, 247500 }, |
161 | { 96000, TMDS_296M, 11648, 281250 }, | 166 | { 88200, TMDS_593M, 17836, 937500 }, |
162 | { 96000, TMDS_297M, 10240, 247500 }, | 167 | { 88200, TMDS_594M, 18816, 990000 }, |
163 | { 176400, TMDS_296M, 17836, 234375 }, | 168 | { 176400, TMDS_296M, 17836, 234375 }, |
164 | { 176400, TMDS_297M, 18816, 247500 }, | 169 | { 176400, TMDS_297M, 18816, 247500 }, |
170 | { 176400, TMDS_593M, 35672, 937500 }, | ||
171 | { 176400, TMDS_594M, 37632, 990000 }, | ||
172 | { 48000, TMDS_296M, 5824, 281250 }, | ||
173 | { 48000, TMDS_297M, 5120, 247500 }, | ||
174 | { 48000, TMDS_593M, 5824, 562500 }, | ||
175 | { 48000, TMDS_594M, 6144, 594000 }, | ||
176 | { 96000, TMDS_296M, 11648, 281250 }, | ||
177 | { 96000, TMDS_297M, 10240, 247500 }, | ||
178 | { 96000, TMDS_593M, 11648, 562500 }, | ||
179 | { 96000, TMDS_594M, 12288, 594000 }, | ||
165 | { 192000, TMDS_296M, 23296, 281250 }, | 180 | { 192000, TMDS_296M, 23296, 281250 }, |
166 | { 192000, TMDS_297M, 20480, 247500 }, | 181 | { 192000, TMDS_297M, 20480, 247500 }, |
182 | { 192000, TMDS_593M, 23296, 562500 }, | ||
183 | { 192000, TMDS_594M, 24576, 594000 }, | ||
167 | }; | 184 | }; |
168 | 185 | ||
169 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ | 186 | /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ |
@@ -912,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev, | |||
912 | if (WARN_ON(acomp->base.ops || acomp->base.dev)) | 929 | if (WARN_ON(acomp->base.ops || acomp->base.dev)) |
913 | return -EEXIST; | 930 | return -EEXIST; |
914 | 931 | ||
932 | if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS))) | ||
933 | return -ENOMEM; | ||
934 | |||
915 | drm_modeset_lock_all(&dev_priv->drm); | 935 | drm_modeset_lock_all(&dev_priv->drm); |
916 | acomp->base.ops = &i915_audio_component_ops; | 936 | acomp->base.ops = &i915_audio_component_ops; |
917 | acomp->base.dev = i915_kdev; | 937 | acomp->base.dev = i915_kdev; |
@@ -935,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev, | |||
935 | acomp->base.dev = NULL; | 955 | acomp->base.dev = NULL; |
936 | dev_priv->audio_component = NULL; | 956 | dev_priv->audio_component = NULL; |
937 | drm_modeset_unlock_all(&dev_priv->drm); | 957 | drm_modeset_unlock_all(&dev_priv->drm); |
958 | |||
959 | device_link_remove(hda_kdev, i915_kdev); | ||
938 | } | 960 | } |
939 | 961 | ||
940 | static const struct component_ops i915_audio_component_bind_ops = { | 962 | static const struct component_ops i915_audio_component_bind_ops = { |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1faa494e2bc9..0694aa8bb9bc 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
420 | intel_bios_ssc_frequency(dev_priv, general->ssc_freq); | 420 | intel_bios_ssc_frequency(dev_priv, general->ssc_freq); |
421 | dev_priv->vbt.display_clock_mode = general->display_clock_mode; | 421 | dev_priv->vbt.display_clock_mode = general->display_clock_mode; |
422 | dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; | 422 | dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; |
423 | if (bdb->version >= 181) { | ||
424 | dev_priv->vbt.orientation = general->rotate_180 ? | ||
425 | DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP : | ||
426 | DRM_MODE_PANEL_ORIENTATION_NORMAL; | ||
427 | } else { | ||
428 | dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; | ||
429 | } | ||
423 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", | 430 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", |
424 | dev_priv->vbt.int_tv_support, | 431 | dev_priv->vbt.int_tv_support, |
425 | dev_priv->vbt.int_crt_support, | 432 | dev_priv->vbt.int_crt_support, |
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv, | |||
852 | 859 | ||
853 | parse_dsi_backlight_ports(dev_priv, bdb->version, port); | 860 | parse_dsi_backlight_ports(dev_priv, bdb->version, port); |
854 | 861 | ||
862 | /* FIXME is the 90 vs. 270 correct? */ | ||
863 | switch (config->rotation) { | ||
864 | case ENABLE_ROTATION_0: | ||
865 | /* | ||
866 | * Most (all?) VBTs claim 0 degrees despite having | ||
867 | * an upside down panel, thus we do not trust this. | ||
868 | */ | ||
869 | dev_priv->vbt.dsi.orientation = | ||
870 | DRM_MODE_PANEL_ORIENTATION_UNKNOWN; | ||
871 | break; | ||
872 | case ENABLE_ROTATION_90: | ||
873 | dev_priv->vbt.dsi.orientation = | ||
874 | DRM_MODE_PANEL_ORIENTATION_RIGHT_UP; | ||
875 | break; | ||
876 | case ENABLE_ROTATION_180: | ||
877 | dev_priv->vbt.dsi.orientation = | ||
878 | DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; | ||
879 | break; | ||
880 | case ENABLE_ROTATION_270: | ||
881 | dev_priv->vbt.dsi.orientation = | ||
882 | DRM_MODE_PANEL_ORIENTATION_LEFT_UP; | ||
883 | break; | ||
884 | } | ||
885 | |||
855 | /* We have mandatory mipi config blocks. Initialize as generic panel */ | 886 | /* We have mandatory mipi config blocks. Initialize as generic panel */ |
856 | dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; | 887 | dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; |
857 | } | 888 | } |
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, | |||
2039 | 2070 | ||
2040 | dvo_port = child->dvo_port; | 2071 | dvo_port = child->dvo_port; |
2041 | 2072 | ||
2042 | switch (dvo_port) { | 2073 | if (dvo_port == DVO_PORT_MIPIA || |
2043 | case DVO_PORT_MIPIA: | 2074 | (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) || |
2044 | case DVO_PORT_MIPIC: | 2075 | (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) { |
2045 | if (port) | 2076 | if (port) |
2046 | *port = dvo_port - DVO_PORT_MIPIA; | 2077 | *port = dvo_port - DVO_PORT_MIPIA; |
2047 | return true; | 2078 | return true; |
2048 | case DVO_PORT_MIPIB: | 2079 | } else if (dvo_port == DVO_PORT_MIPIB || |
2049 | case DVO_PORT_MIPID: | 2080 | dvo_port == DVO_PORT_MIPIC || |
2081 | dvo_port == DVO_PORT_MIPID) { | ||
2050 | DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", | 2082 | DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", |
2051 | port_name(dvo_port - DVO_PORT_MIPIA)); | 2083 | port_name(dvo_port - DVO_PORT_MIPIA)); |
2052 | break; | ||
2053 | } | 2084 | } |
2054 | } | 2085 | } |
2055 | 2086 | ||
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, | |||
2159 | 2190 | ||
2160 | return false; | 2191 | return false; |
2161 | } | 2192 | } |
2193 | |||
2194 | enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, | ||
2195 | enum port port) | ||
2196 | { | ||
2197 | const struct ddi_vbt_port_info *info = | ||
2198 | &dev_priv->vbt.ddi_port_info[port]; | ||
2199 | enum aux_ch aux_ch; | ||
2200 | |||
2201 | if (!info->alternate_aux_channel) { | ||
2202 | aux_ch = (enum aux_ch)port; | ||
2203 | |||
2204 | DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", | ||
2205 | aux_ch_name(aux_ch), port_name(port)); | ||
2206 | return aux_ch; | ||
2207 | } | ||
2208 | |||
2209 | switch (info->alternate_aux_channel) { | ||
2210 | case DP_AUX_A: | ||
2211 | aux_ch = AUX_CH_A; | ||
2212 | break; | ||
2213 | case DP_AUX_B: | ||
2214 | aux_ch = AUX_CH_B; | ||
2215 | break; | ||
2216 | case DP_AUX_C: | ||
2217 | aux_ch = AUX_CH_C; | ||
2218 | break; | ||
2219 | case DP_AUX_D: | ||
2220 | aux_ch = AUX_CH_D; | ||
2221 | break; | ||
2222 | case DP_AUX_E: | ||
2223 | aux_ch = AUX_CH_E; | ||
2224 | break; | ||
2225 | case DP_AUX_F: | ||
2226 | aux_ch = AUX_CH_F; | ||
2227 | break; | ||
2228 | default: | ||
2229 | MISSING_CASE(info->alternate_aux_channel); | ||
2230 | aux_ch = AUX_CH_A; | ||
2231 | break; | ||
2232 | } | ||
2233 | |||
2234 | DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", | ||
2235 | aux_ch_name(aux_ch), port_name(port)); | ||
2236 | |||
2237 | return aux_ch; | ||
2238 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 29075c763428..25e3aba9cded 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c | |||
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, | |||
2138 | static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, | 2138 | static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, |
2139 | int pixel_rate) | 2139 | int pixel_rate) |
2140 | { | 2140 | { |
2141 | if (INTEL_GEN(dev_priv) >= 10) | 2141 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
2142 | return DIV_ROUND_UP(pixel_rate, 2); | 2142 | return DIV_ROUND_UP(pixel_rate, 2); |
2143 | else if (IS_GEMINILAKE(dev_priv)) | ||
2144 | /* | ||
2145 | * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk | ||
2146 | * as a temporary workaround. Use a higher cdclk instead. (Note that | ||
2147 | * intel_compute_max_dotclk() limits the max pixel clock to 99% of max | ||
2148 | * cdclk.) | ||
2149 | */ | ||
2150 | return DIV_ROUND_UP(pixel_rate * 100, 2 * 99); | ||
2151 | else if (IS_GEN9(dev_priv) || | 2143 | else if (IS_GEN9(dev_priv) || |
2152 | IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) | 2144 | IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) |
2153 | return pixel_rate; | 2145 | return pixel_rate; |
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) | |||
2543 | { | 2535 | { |
2544 | int max_cdclk_freq = dev_priv->max_cdclk_freq; | 2536 | int max_cdclk_freq = dev_priv->max_cdclk_freq; |
2545 | 2537 | ||
2546 | if (INTEL_GEN(dev_priv) >= 10) | 2538 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
2547 | return 2 * max_cdclk_freq; | 2539 | return 2 * max_cdclk_freq; |
2548 | else if (IS_GEMINILAKE(dev_priv)) | ||
2549 | /* | ||
2550 | * FIXME: Limiting to 99% as a temporary workaround. See | ||
2551 | * intel_min_cdclk() for details. | ||
2552 | */ | ||
2553 | return 2 * max_cdclk_freq * 99 / 100; | ||
2554 | else if (IS_GEN9(dev_priv) || | 2540 | else if (IS_GEN9(dev_priv) || |
2555 | IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) | 2541 | IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) |
2556 | return max_cdclk_freq; | 2542 | return max_cdclk_freq; |
@@ -2674,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) | |||
2674 | fraction = 200; | 2660 | fraction = 200; |
2675 | } | 2661 | } |
2676 | 2662 | ||
2677 | rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1); | 2663 | rawclk = CNP_RAWCLK_DIV(divider / 1000); |
2678 | if (fraction) | 2664 | if (fraction) { |
2679 | rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000, | 2665 | int numerator = 1; |
2680 | fraction) - 1); | ||
2681 | |||
2682 | I915_WRITE(PCH_RAWCLK_FREQ, rawclk); | ||
2683 | return divider + fraction; | ||
2684 | } | ||
2685 | 2666 | ||
2686 | static int icp_rawclk(struct drm_i915_private *dev_priv) | 2667 | rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, |
2687 | { | 2668 | fraction) - 1); |
2688 | u32 rawclk; | 2669 | if (HAS_PCH_ICP(dev_priv)) |
2689 | int divider, numerator, denominator, frequency; | 2670 | rawclk |= ICP_RAWCLK_NUM(numerator); |
2690 | |||
2691 | if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) { | ||
2692 | frequency = 24000; | ||
2693 | divider = 23; | ||
2694 | numerator = 0; | ||
2695 | denominator = 0; | ||
2696 | } else { | ||
2697 | frequency = 19200; | ||
2698 | divider = 18; | ||
2699 | numerator = 1; | ||
2700 | denominator = 4; | ||
2701 | } | 2671 | } |
2702 | 2672 | ||
2703 | rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) | | ||
2704 | ICP_RAWCLK_DEN(denominator); | ||
2705 | |||
2706 | I915_WRITE(PCH_RAWCLK_FREQ, rawclk); | 2673 | I915_WRITE(PCH_RAWCLK_FREQ, rawclk); |
2707 | return frequency; | 2674 | return divider + fraction; |
2708 | } | 2675 | } |
2709 | 2676 | ||
2710 | static int pch_rawclk(struct drm_i915_private *dev_priv) | 2677 | static int pch_rawclk(struct drm_i915_private *dev_priv) |
@@ -2754,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) | |||
2754 | */ | 2721 | */ |
2755 | void intel_update_rawclk(struct drm_i915_private *dev_priv) | 2722 | void intel_update_rawclk(struct drm_i915_private *dev_priv) |
2756 | { | 2723 | { |
2757 | if (HAS_PCH_ICP(dev_priv)) | 2724 | if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) |
2758 | dev_priv->rawclk_freq = icp_rawclk(dev_priv); | ||
2759 | else if (HAS_PCH_CNP(dev_priv)) | ||
2760 | dev_priv->rawclk_freq = cnp_rawclk(dev_priv); | 2725 | dev_priv->rawclk_freq = cnp_rawclk(dev_priv); |
2761 | else if (HAS_PCH_SPLIT(dev_priv)) | 2726 | else if (HAS_PCH_SPLIT(dev_priv)) |
2762 | dev_priv->rawclk_freq = pch_rawclk(dev_priv); | 2727 | dev_priv->rawclk_freq = pch_rawclk(dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index c6a7beabd58d..5127da286a2b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state) | |||
149 | if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) | 149 | if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) |
150 | limited_color_range = intel_crtc_state->limited_color_range; | 150 | limited_color_range = intel_crtc_state->limited_color_range; |
151 | 151 | ||
152 | if (intel_crtc_state->ycbcr420) { | 152 | if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || |
153 | intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { | ||
153 | ilk_load_ycbcr_conversion_matrix(intel_crtc); | 154 | ilk_load_ycbcr_conversion_matrix(intel_crtc); |
154 | return; | 155 | return; |
155 | } else if (crtc_state->ctm) { | 156 | } else if (crtc_state->ctm) { |
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c new file mode 100644 index 000000000000..3d0271cebf99 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_combo_phy.c | |||
@@ -0,0 +1,254 @@ | |||
1 | // SPDX-License-Identifier: MIT | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corporation | ||
4 | */ | ||
5 | |||
6 | #include "intel_drv.h" | ||
7 | |||
8 | #define for_each_combo_port(__dev_priv, __port) \ | ||
9 | for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ | ||
10 | for_each_if(intel_port_is_combophy(__dev_priv, __port)) | ||
11 | |||
12 | #define for_each_combo_port_reverse(__dev_priv, __port) \ | ||
13 | for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \ | ||
14 | for_each_if(intel_port_is_combophy(__dev_priv, __port)) | ||
15 | |||
16 | enum { | ||
17 | PROCMON_0_85V_DOT_0, | ||
18 | PROCMON_0_95V_DOT_0, | ||
19 | PROCMON_0_95V_DOT_1, | ||
20 | PROCMON_1_05V_DOT_0, | ||
21 | PROCMON_1_05V_DOT_1, | ||
22 | }; | ||
23 | |||
24 | static const struct cnl_procmon { | ||
25 | u32 dw1, dw9, dw10; | ||
26 | } cnl_procmon_values[] = { | ||
27 | [PROCMON_0_85V_DOT_0] = | ||
28 | { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, | ||
29 | [PROCMON_0_95V_DOT_0] = | ||
30 | { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, | ||
31 | [PROCMON_0_95V_DOT_1] = | ||
32 | { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, | ||
33 | [PROCMON_1_05V_DOT_0] = | ||
34 | { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, | ||
35 | [PROCMON_1_05V_DOT_1] = | ||
36 | { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * CNL has just one set of registers, while ICL has two sets: one for port A and | ||
41 | * the other for port B. The CNL registers are equivalent to the ICL port A | ||
42 | * registers, that's why we call the ICL macros even though the function has CNL | ||
43 | * on its name. | ||
44 | */ | ||
45 | static const struct cnl_procmon * | ||
46 | cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port) | ||
47 | { | ||
48 | const struct cnl_procmon *procmon; | ||
49 | u32 val; | ||
50 | |||
51 | val = I915_READ(ICL_PORT_COMP_DW3(port)); | ||
52 | switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { | ||
53 | default: | ||
54 | MISSING_CASE(val); | ||
55 | /* fall through */ | ||
56 | case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: | ||
57 | procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; | ||
58 | break; | ||
59 | case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: | ||
60 | procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; | ||
61 | break; | ||
62 | case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: | ||
63 | procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; | ||
64 | break; | ||
65 | case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: | ||
66 | procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; | ||
67 | break; | ||
68 | case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: | ||
69 | procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; | ||
70 | break; | ||
71 | } | ||
72 | |||
73 | return procmon; | ||
74 | } | ||
75 | |||
76 | static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, | ||
77 | enum port port) | ||
78 | { | ||
79 | const struct cnl_procmon *procmon; | ||
80 | u32 val; | ||
81 | |||
82 | procmon = cnl_get_procmon_ref_values(dev_priv, port); | ||
83 | |||
84 | val = I915_READ(ICL_PORT_COMP_DW1(port)); | ||
85 | val &= ~((0xff << 16) | 0xff); | ||
86 | val |= procmon->dw1; | ||
87 | I915_WRITE(ICL_PORT_COMP_DW1(port), val); | ||
88 | |||
89 | I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); | ||
90 | I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); | ||
91 | } | ||
92 | |||
93 | static bool check_phy_reg(struct drm_i915_private *dev_priv, | ||
94 | enum port port, i915_reg_t reg, u32 mask, | ||
95 | u32 expected_val) | ||
96 | { | ||
97 | u32 val = I915_READ(reg); | ||
98 | |||
99 | if ((val & mask) != expected_val) { | ||
100 | DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: " | ||
101 | "current %08x mask %08x expected %08x\n", | ||
102 | port_name(port), | ||
103 | reg.reg, val, mask, expected_val); | ||
104 | return false; | ||
105 | } | ||
106 | |||
107 | return true; | ||
108 | } | ||
109 | |||
110 | static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv, | ||
111 | enum port port) | ||
112 | { | ||
113 | const struct cnl_procmon *procmon; | ||
114 | bool ret; | ||
115 | |||
116 | procmon = cnl_get_procmon_ref_values(dev_priv, port); | ||
117 | |||
118 | ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port), | ||
119 | (0xff << 16) | 0xff, procmon->dw1); | ||
120 | ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port), | ||
121 | -1U, procmon->dw9); | ||
122 | ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port), | ||
123 | -1U, procmon->dw10); | ||
124 | |||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv) | ||
129 | { | ||
130 | return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) && | ||
131 | (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT); | ||
132 | } | ||
133 | |||
134 | static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv) | ||
135 | { | ||
136 | enum port port = PORT_A; | ||
137 | bool ret; | ||
138 | |||
139 | if (!cnl_combo_phy_enabled(dev_priv)) | ||
140 | return false; | ||
141 | |||
142 | ret = cnl_verify_procmon_ref_values(dev_priv, port); | ||
143 | |||
144 | ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5, | ||
145 | CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); | ||
146 | |||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | void cnl_combo_phys_init(struct drm_i915_private *dev_priv) | ||
151 | { | ||
152 | u32 val; | ||
153 | |||
154 | val = I915_READ(CHICKEN_MISC_2); | ||
155 | val &= ~CNL_COMP_PWR_DOWN; | ||
156 | I915_WRITE(CHICKEN_MISC_2, val); | ||
157 | |||
158 | /* Dummy PORT_A to get the correct CNL register from the ICL macro */ | ||
159 | cnl_set_procmon_ref_values(dev_priv, PORT_A); | ||
160 | |||
161 | val = I915_READ(CNL_PORT_COMP_DW0); | ||
162 | val |= COMP_INIT; | ||
163 | I915_WRITE(CNL_PORT_COMP_DW0, val); | ||
164 | |||
165 | val = I915_READ(CNL_PORT_CL1CM_DW5); | ||
166 | val |= CL_POWER_DOWN_ENABLE; | ||
167 | I915_WRITE(CNL_PORT_CL1CM_DW5, val); | ||
168 | } | ||
169 | |||
170 | void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv) | ||
171 | { | ||
172 | u32 val; | ||
173 | |||
174 | if (!cnl_combo_phy_verify_state(dev_priv)) | ||
175 | DRM_WARN("Combo PHY HW state changed unexpectedly.\n"); | ||
176 | |||
177 | val = I915_READ(CHICKEN_MISC_2); | ||
178 | val |= CNL_COMP_PWR_DOWN; | ||
179 | I915_WRITE(CHICKEN_MISC_2, val); | ||
180 | } | ||
181 | |||
182 | static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv, | ||
183 | enum port port) | ||
184 | { | ||
185 | return !(I915_READ(ICL_PHY_MISC(port)) & | ||
186 | ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) && | ||
187 | (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT); | ||
188 | } | ||
189 | |||
190 | static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, | ||
191 | enum port port) | ||
192 | { | ||
193 | bool ret; | ||
194 | |||
195 | if (!icl_combo_phy_enabled(dev_priv, port)) | ||
196 | return false; | ||
197 | |||
198 | ret = cnl_verify_procmon_ref_values(dev_priv, port); | ||
199 | |||
200 | ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port), | ||
201 | CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE); | ||
202 | |||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | void icl_combo_phys_init(struct drm_i915_private *dev_priv) | ||
207 | { | ||
208 | enum port port; | ||
209 | |||
210 | for_each_combo_port(dev_priv, port) { | ||
211 | u32 val; | ||
212 | |||
213 | if (icl_combo_phy_verify_state(dev_priv, port)) { | ||
214 | DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n", | ||
215 | port_name(port)); | ||
216 | continue; | ||
217 | } | ||
218 | |||
219 | val = I915_READ(ICL_PHY_MISC(port)); | ||
220 | val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; | ||
221 | I915_WRITE(ICL_PHY_MISC(port), val); | ||
222 | |||
223 | cnl_set_procmon_ref_values(dev_priv, port); | ||
224 | |||
225 | val = I915_READ(ICL_PORT_COMP_DW0(port)); | ||
226 | val |= COMP_INIT; | ||
227 | I915_WRITE(ICL_PORT_COMP_DW0(port), val); | ||
228 | |||
229 | val = I915_READ(ICL_PORT_CL_DW5(port)); | ||
230 | val |= CL_POWER_DOWN_ENABLE; | ||
231 | I915_WRITE(ICL_PORT_CL_DW5(port), val); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) | ||
236 | { | ||
237 | enum port port; | ||
238 | |||
239 | for_each_combo_port_reverse(dev_priv, port) { | ||
240 | u32 val; | ||
241 | |||
242 | if (!icl_combo_phy_verify_state(dev_priv, port)) | ||
243 | DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n", | ||
244 | port_name(port)); | ||
245 | |||
246 | val = I915_READ(ICL_PHY_MISC(port)); | ||
247 | val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; | ||
248 | I915_WRITE(ICL_PHY_MISC(port), val); | ||
249 | |||
250 | val = I915_READ(ICL_PORT_COMP_DW0(port)); | ||
251 | val &= ~COMP_INIT; | ||
252 | I915_WRITE(ICL_PORT_COMP_DW0(port), val); | ||
253 | } | ||
254 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c index ca44bf368e24..18e370f607bc 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_connector.c | |||
@@ -25,11 +25,140 @@ | |||
25 | 25 | ||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | #include <drm/drm_atomic_helper.h> | ||
28 | #include <drm/drm_edid.h> | 29 | #include <drm/drm_edid.h> |
29 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
30 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
31 | #include "i915_drv.h" | 32 | #include "i915_drv.h" |
32 | 33 | ||
34 | int intel_connector_init(struct intel_connector *connector) | ||
35 | { | ||
36 | struct intel_digital_connector_state *conn_state; | ||
37 | |||
38 | /* | ||
39 | * Allocate enough memory to hold intel_digital_connector_state, | ||
40 | * This might be a few bytes too many, but for connectors that don't | ||
41 | * need it we'll free the state and allocate a smaller one on the first | ||
42 | * successful commit anyway. | ||
43 | */ | ||
44 | conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); | ||
45 | if (!conn_state) | ||
46 | return -ENOMEM; | ||
47 | |||
48 | __drm_atomic_helper_connector_reset(&connector->base, | ||
49 | &conn_state->base); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | struct intel_connector *intel_connector_alloc(void) | ||
55 | { | ||
56 | struct intel_connector *connector; | ||
57 | |||
58 | connector = kzalloc(sizeof(*connector), GFP_KERNEL); | ||
59 | if (!connector) | ||
60 | return NULL; | ||
61 | |||
62 | if (intel_connector_init(connector) < 0) { | ||
63 | kfree(connector); | ||
64 | return NULL; | ||
65 | } | ||
66 | |||
67 | return connector; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Free the bits allocated by intel_connector_alloc. | ||
72 | * This should only be used after intel_connector_alloc has returned | ||
73 | * successfully, and before drm_connector_init returns successfully. | ||
74 | * Otherwise the destroy callbacks for the connector and the state should | ||
75 | * take care of proper cleanup/free (see intel_connector_destroy). | ||
76 | */ | ||
77 | void intel_connector_free(struct intel_connector *connector) | ||
78 | { | ||
79 | kfree(to_intel_digital_connector_state(connector->base.state)); | ||
80 | kfree(connector); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Connector type independent destroy hook for drm_connector_funcs. | ||
85 | */ | ||
86 | void intel_connector_destroy(struct drm_connector *connector) | ||
87 | { | ||
88 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
89 | |||
90 | kfree(intel_connector->detect_edid); | ||
91 | |||
92 | if (!IS_ERR_OR_NULL(intel_connector->edid)) | ||
93 | kfree(intel_connector->edid); | ||
94 | |||
95 | intel_panel_fini(&intel_connector->panel); | ||
96 | |||
97 | drm_connector_cleanup(connector); | ||
98 | kfree(connector); | ||
99 | } | ||
100 | |||
101 | int intel_connector_register(struct drm_connector *connector) | ||
102 | { | ||
103 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
104 | int ret; | ||
105 | |||
106 | ret = intel_backlight_device_register(intel_connector); | ||
107 | if (ret) | ||
108 | goto err; | ||
109 | |||
110 | if (i915_inject_load_failure()) { | ||
111 | ret = -EFAULT; | ||
112 | goto err_backlight; | ||
113 | } | ||
114 | |||
115 | return 0; | ||
116 | |||
117 | err_backlight: | ||
118 | intel_backlight_device_unregister(intel_connector); | ||
119 | err: | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | void intel_connector_unregister(struct drm_connector *connector) | ||
124 | { | ||
125 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
126 | |||
127 | intel_backlight_device_unregister(intel_connector); | ||
128 | } | ||
129 | |||
130 | void intel_connector_attach_encoder(struct intel_connector *connector, | ||
131 | struct intel_encoder *encoder) | ||
132 | { | ||
133 | connector->encoder = encoder; | ||
134 | drm_connector_attach_encoder(&connector->base, &encoder->base); | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Simple connector->get_hw_state implementation for encoders that support only | ||
139 | * one connector and no cloning and hence the encoder state determines the state | ||
140 | * of the connector. | ||
141 | */ | ||
142 | bool intel_connector_get_hw_state(struct intel_connector *connector) | ||
143 | { | ||
144 | enum pipe pipe = 0; | ||
145 | struct intel_encoder *encoder = connector->encoder; | ||
146 | |||
147 | return encoder->get_hw_state(encoder, &pipe); | ||
148 | } | ||
149 | |||
150 | enum pipe intel_connector_get_pipe(struct intel_connector *connector) | ||
151 | { | ||
152 | struct drm_device *dev = connector->base.dev; | ||
153 | |||
154 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
155 | |||
156 | if (!connector->base.state->crtc) | ||
157 | return INVALID_PIPE; | ||
158 | |||
159 | return to_intel_crtc(connector->base.state->crtc)->pipe; | ||
160 | } | ||
161 | |||
33 | /** | 162 | /** |
34 | * intel_connector_update_modes - update connector from edid | 163 | * intel_connector_update_modes - update connector from edid |
35 | * @connector: DRM connector device to use | 164 | * @connector: DRM connector device to use |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 0c6bf82bb059..68f2fb89ece3 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, | |||
354 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | 354 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
355 | return false; | 355 | return false; |
356 | 356 | ||
357 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
357 | return true; | 358 | return true; |
358 | } | 359 | } |
359 | 360 | ||
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder, | |||
368 | return false; | 369 | return false; |
369 | 370 | ||
370 | pipe_config->has_pch_encoder = true; | 371 | pipe_config->has_pch_encoder = true; |
372 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
371 | 373 | ||
372 | return true; | 374 | return true; |
373 | } | 375 | } |
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder, | |||
389 | return false; | 391 | return false; |
390 | 392 | ||
391 | pipe_config->has_pch_encoder = true; | 393 | pipe_config->has_pch_encoder = true; |
394 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
392 | 395 | ||
393 | /* LPT FDI RX only supports 8bpc. */ | 396 | /* LPT FDI RX only supports 8bpc. */ |
394 | if (HAS_PCH_LPT(dev_priv)) { | 397 | if (HAS_PCH_LPT(dev_priv)) { |
@@ -849,12 +852,6 @@ out: | |||
849 | return status; | 852 | return status; |
850 | } | 853 | } |
851 | 854 | ||
852 | static void intel_crt_destroy(struct drm_connector *connector) | ||
853 | { | ||
854 | drm_connector_cleanup(connector); | ||
855 | kfree(connector); | ||
856 | } | ||
857 | |||
858 | static int intel_crt_get_modes(struct drm_connector *connector) | 855 | static int intel_crt_get_modes(struct drm_connector *connector) |
859 | { | 856 | { |
860 | struct drm_device *dev = connector->dev; | 857 | struct drm_device *dev = connector->dev; |
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { | |||
909 | .fill_modes = drm_helper_probe_single_connector_modes, | 906 | .fill_modes = drm_helper_probe_single_connector_modes, |
910 | .late_register = intel_connector_register, | 907 | .late_register = intel_connector_register, |
911 | .early_unregister = intel_connector_unregister, | 908 | .early_unregister = intel_connector_unregister, |
912 | .destroy = intel_crt_destroy, | 909 | .destroy = intel_connector_destroy, |
913 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 910 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
914 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 911 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
915 | }; | 912 | }; |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index d48186e9ddad..a516697bf57d 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
@@ -34,34 +34,38 @@ | |||
34 | * low-power state and comes back to normal. | 34 | * low-power state and comes back to normal. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin" | 37 | #define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE |
38 | MODULE_FIRMWARE(I915_CSR_ICL); | ||
39 | #define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | ||
40 | 38 | ||
41 | #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" | 39 | #define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin" |
42 | MODULE_FIRMWARE(I915_CSR_GLK); | 40 | #define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) |
43 | #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) | 41 | #define ICL_CSR_MAX_FW_SIZE 0x6000 |
42 | MODULE_FIRMWARE(ICL_CSR_PATH); | ||
44 | 43 | ||
45 | #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" | 44 | #define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin" |
46 | MODULE_FIRMWARE(I915_CSR_CNL); | ||
47 | #define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | 45 | #define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) |
46 | #define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE | ||
47 | MODULE_FIRMWARE(CNL_CSR_PATH); | ||
48 | |||
49 | #define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin" | ||
50 | #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) | ||
51 | #define GLK_CSR_MAX_FW_SIZE 0x4000 | ||
52 | MODULE_FIRMWARE(GLK_CSR_PATH); | ||
48 | 53 | ||
49 | #define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin" | 54 | #define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin" |
50 | MODULE_FIRMWARE(I915_CSR_KBL); | ||
51 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) | 55 | #define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) |
56 | #define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE | ||
57 | MODULE_FIRMWARE(KBL_CSR_PATH); | ||
52 | 58 | ||
53 | #define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin" | 59 | #define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin" |
54 | MODULE_FIRMWARE(I915_CSR_SKL); | ||
55 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27) | 60 | #define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27) |
61 | #define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE | ||
62 | MODULE_FIRMWARE(SKL_CSR_PATH); | ||
56 | 63 | ||
57 | #define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin" | 64 | #define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin" |
58 | MODULE_FIRMWARE(I915_CSR_BXT); | ||
59 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) | 65 | #define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) |
60 | |||
61 | |||
62 | #define BXT_CSR_MAX_FW_SIZE 0x3000 | 66 | #define BXT_CSR_MAX_FW_SIZE 0x3000 |
63 | #define GLK_CSR_MAX_FW_SIZE 0x4000 | 67 | MODULE_FIRMWARE(BXT_CSR_PATH); |
64 | #define ICL_CSR_MAX_FW_SIZE 0x6000 | 68 | |
65 | #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF | 69 | #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF |
66 | 70 | ||
67 | struct intel_css_header { | 71 | struct intel_css_header { |
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = { | |||
190 | {'B', '0'}, {'B', '1'}, {'B', '2'} | 194 | {'B', '0'}, {'B', '1'}, {'B', '2'} |
191 | }; | 195 | }; |
192 | 196 | ||
197 | static const struct stepping_info icl_stepping_info[] = { | ||
198 | {'A', '0'}, {'A', '1'}, {'A', '2'}, | ||
199 | {'B', '0'}, {'B', '2'}, | ||
200 | {'C', '0'} | ||
201 | }; | ||
202 | |||
193 | static const struct stepping_info no_stepping_info = { '*', '*' }; | 203 | static const struct stepping_info no_stepping_info = { '*', '*' }; |
194 | 204 | ||
195 | static const struct stepping_info * | 205 | static const struct stepping_info * |
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv) | |||
198 | const struct stepping_info *si; | 208 | const struct stepping_info *si; |
199 | unsigned int size; | 209 | unsigned int size; |
200 | 210 | ||
201 | if (IS_SKYLAKE(dev_priv)) { | 211 | if (IS_ICELAKE(dev_priv)) { |
212 | size = ARRAY_SIZE(icl_stepping_info); | ||
213 | si = icl_stepping_info; | ||
214 | } else if (IS_SKYLAKE(dev_priv)) { | ||
202 | size = ARRAY_SIZE(skl_stepping_info); | 215 | size = ARRAY_SIZE(skl_stepping_info); |
203 | si = skl_stepping_info; | 216 | si = skl_stepping_info; |
204 | } else if (IS_BROXTON(dev_priv)) { | 217 | } else if (IS_BROXTON(dev_priv)) { |
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, | |||
285 | struct intel_csr *csr = &dev_priv->csr; | 298 | struct intel_csr *csr = &dev_priv->csr; |
286 | const struct stepping_info *si = intel_get_stepping_info(dev_priv); | 299 | const struct stepping_info *si = intel_get_stepping_info(dev_priv); |
287 | uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; | 300 | uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; |
288 | uint32_t max_fw_size = 0; | ||
289 | uint32_t i; | 301 | uint32_t i; |
290 | uint32_t *dmc_payload; | 302 | uint32_t *dmc_payload; |
291 | uint32_t required_version; | ||
292 | 303 | ||
293 | if (!fw) | 304 | if (!fw) |
294 | return NULL; | 305 | return NULL; |
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, | |||
303 | return NULL; | 314 | return NULL; |
304 | } | 315 | } |
305 | 316 | ||
306 | csr->version = css_header->version; | 317 | if (csr->required_version && |
307 | 318 | css_header->version != csr->required_version) { | |
308 | if (csr->fw_path == i915_modparams.dmc_firmware_path) { | ||
309 | /* Bypass version check for firmware override. */ | ||
310 | required_version = csr->version; | ||
311 | } else if (IS_ICELAKE(dev_priv)) { | ||
312 | required_version = ICL_CSR_VERSION_REQUIRED; | ||
313 | } else if (IS_CANNONLAKE(dev_priv)) { | ||
314 | required_version = CNL_CSR_VERSION_REQUIRED; | ||
315 | } else if (IS_GEMINILAKE(dev_priv)) { | ||
316 | required_version = GLK_CSR_VERSION_REQUIRED; | ||
317 | } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { | ||
318 | required_version = KBL_CSR_VERSION_REQUIRED; | ||
319 | } else if (IS_SKYLAKE(dev_priv)) { | ||
320 | required_version = SKL_CSR_VERSION_REQUIRED; | ||
321 | } else if (IS_BROXTON(dev_priv)) { | ||
322 | required_version = BXT_CSR_VERSION_REQUIRED; | ||
323 | } else { | ||
324 | MISSING_CASE(INTEL_REVID(dev_priv)); | ||
325 | required_version = 0; | ||
326 | } | ||
327 | |||
328 | if (csr->version != required_version) { | ||
329 | DRM_INFO("Refusing to load DMC firmware v%u.%u," | 319 | DRM_INFO("Refusing to load DMC firmware v%u.%u," |
330 | " please use v%u.%u\n", | 320 | " please use v%u.%u\n", |
331 | CSR_VERSION_MAJOR(csr->version), | 321 | CSR_VERSION_MAJOR(css_header->version), |
332 | CSR_VERSION_MINOR(csr->version), | 322 | CSR_VERSION_MINOR(css_header->version), |
333 | CSR_VERSION_MAJOR(required_version), | 323 | CSR_VERSION_MAJOR(csr->required_version), |
334 | CSR_VERSION_MINOR(required_version)); | 324 | CSR_VERSION_MINOR(csr->required_version)); |
335 | return NULL; | 325 | return NULL; |
336 | } | 326 | } |
337 | 327 | ||
328 | csr->version = css_header->version; | ||
329 | |||
338 | readcount += sizeof(struct intel_css_header); | 330 | readcount += sizeof(struct intel_css_header); |
339 | 331 | ||
340 | /* Extract Package Header information*/ | 332 | /* Extract Package Header information*/ |
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, | |||
402 | 394 | ||
403 | /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ | 395 | /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */ |
404 | nbytes = dmc_header->fw_size * 4; | 396 | nbytes = dmc_header->fw_size * 4; |
405 | if (INTEL_GEN(dev_priv) >= 11) | 397 | if (nbytes > csr->max_fw_size) { |
406 | max_fw_size = ICL_CSR_MAX_FW_SIZE; | ||
407 | else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) | ||
408 | max_fw_size = GLK_CSR_MAX_FW_SIZE; | ||
409 | else if (IS_GEN9(dev_priv)) | ||
410 | max_fw_size = BXT_CSR_MAX_FW_SIZE; | ||
411 | else | ||
412 | MISSING_CASE(INTEL_REVID(dev_priv)); | ||
413 | if (nbytes > max_fw_size) { | ||
414 | DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); | 398 | DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes); |
415 | return NULL; | 399 | return NULL; |
416 | } | 400 | } |
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) | |||
475 | if (!HAS_CSR(dev_priv)) | 459 | if (!HAS_CSR(dev_priv)) |
476 | return; | 460 | return; |
477 | 461 | ||
478 | if (i915_modparams.dmc_firmware_path) | ||
479 | csr->fw_path = i915_modparams.dmc_firmware_path; | ||
480 | else if (IS_ICELAKE(dev_priv)) | ||
481 | csr->fw_path = I915_CSR_ICL; | ||
482 | else if (IS_CANNONLAKE(dev_priv)) | ||
483 | csr->fw_path = I915_CSR_CNL; | ||
484 | else if (IS_GEMINILAKE(dev_priv)) | ||
485 | csr->fw_path = I915_CSR_GLK; | ||
486 | else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) | ||
487 | csr->fw_path = I915_CSR_KBL; | ||
488 | else if (IS_SKYLAKE(dev_priv)) | ||
489 | csr->fw_path = I915_CSR_SKL; | ||
490 | else if (IS_BROXTON(dev_priv)) | ||
491 | csr->fw_path = I915_CSR_BXT; | ||
492 | |||
493 | /* | 462 | /* |
494 | * Obtain a runtime pm reference, until CSR is loaded, | 463 | * Obtain a runtime pm reference, until CSR is loaded, to avoid entering |
495 | * to avoid entering runtime-suspend. | 464 | * runtime-suspend. |
465 | * | ||
466 | * On error, we return with the rpm wakeref held to prevent runtime | ||
467 | * suspend as runtime suspend *requires* a working CSR for whatever | ||
468 | * reason. | ||
496 | */ | 469 | */ |
497 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); | 470 | intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); |
498 | 471 | ||
472 | if (INTEL_GEN(dev_priv) >= 12) { | ||
473 | /* Allow to load fw via parameter using the last known size */ | ||
474 | csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; | ||
475 | } else if (IS_ICELAKE(dev_priv)) { | ||
476 | csr->fw_path = ICL_CSR_PATH; | ||
477 | csr->required_version = ICL_CSR_VERSION_REQUIRED; | ||
478 | csr->max_fw_size = ICL_CSR_MAX_FW_SIZE; | ||
479 | } else if (IS_CANNONLAKE(dev_priv)) { | ||
480 | csr->fw_path = CNL_CSR_PATH; | ||
481 | csr->required_version = CNL_CSR_VERSION_REQUIRED; | ||
482 | csr->max_fw_size = CNL_CSR_MAX_FW_SIZE; | ||
483 | } else if (IS_GEMINILAKE(dev_priv)) { | ||
484 | csr->fw_path = GLK_CSR_PATH; | ||
485 | csr->required_version = GLK_CSR_VERSION_REQUIRED; | ||
486 | csr->max_fw_size = GLK_CSR_MAX_FW_SIZE; | ||
487 | } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { | ||
488 | csr->fw_path = KBL_CSR_PATH; | ||
489 | csr->required_version = KBL_CSR_VERSION_REQUIRED; | ||
490 | csr->max_fw_size = KBL_CSR_MAX_FW_SIZE; | ||
491 | } else if (IS_SKYLAKE(dev_priv)) { | ||
492 | csr->fw_path = SKL_CSR_PATH; | ||
493 | csr->required_version = SKL_CSR_VERSION_REQUIRED; | ||
494 | csr->max_fw_size = SKL_CSR_MAX_FW_SIZE; | ||
495 | } else if (IS_BROXTON(dev_priv)) { | ||
496 | csr->fw_path = BXT_CSR_PATH; | ||
497 | csr->required_version = BXT_CSR_VERSION_REQUIRED; | ||
498 | csr->max_fw_size = BXT_CSR_MAX_FW_SIZE; | ||
499 | } | ||
500 | |||
501 | if (i915_modparams.dmc_firmware_path) { | ||
502 | if (strlen(i915_modparams.dmc_firmware_path) == 0) { | ||
503 | csr->fw_path = NULL; | ||
504 | DRM_INFO("Disabling CSR firmware and runtime PM\n"); | ||
505 | return; | ||
506 | } | ||
507 | |||
508 | csr->fw_path = i915_modparams.dmc_firmware_path; | ||
509 | /* Bypass version check for firmware override. */ | ||
510 | csr->required_version = 0; | ||
511 | } | ||
512 | |||
499 | if (csr->fw_path == NULL) { | 513 | if (csr->fw_path == NULL) { |
500 | DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); | 514 | DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n"); |
501 | WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); | 515 | WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv))); |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 5186cd7075f9..ad11540ac436 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -642,7 +642,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) | |||
642 | static const struct ddi_buf_trans * | 642 | static const struct ddi_buf_trans * |
643 | kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) | 643 | kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) |
644 | { | 644 | { |
645 | if (IS_KBL_ULX(dev_priv)) { | 645 | if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { |
646 | *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); | 646 | *n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp); |
647 | return kbl_y_ddi_translations_dp; | 647 | return kbl_y_ddi_translations_dp; |
648 | } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { | 648 | } else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) { |
@@ -658,7 +658,7 @@ static const struct ddi_buf_trans * | |||
658 | skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | 658 | skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) |
659 | { | 659 | { |
660 | if (dev_priv->vbt.edp.low_vswing) { | 660 | if (dev_priv->vbt.edp.low_vswing) { |
661 | if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { | 661 | if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { |
662 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); | 662 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); |
663 | return skl_y_ddi_translations_edp; | 663 | return skl_y_ddi_translations_edp; |
664 | } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || | 664 | } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) || |
@@ -680,7 +680,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
680 | static const struct ddi_buf_trans * | 680 | static const struct ddi_buf_trans * |
681 | skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) | 681 | skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) |
682 | { | 682 | { |
683 | if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { | 683 | if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) { |
684 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); | 684 | *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi); |
685 | return skl_y_ddi_translations_hdmi; | 685 | return skl_y_ddi_translations_hdmi; |
686 | } else { | 686 | } else { |
@@ -1060,10 +1060,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) | |||
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | 1062 | static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, |
1063 | const struct intel_shared_dpll *pll) | 1063 | const struct intel_crtc_state *crtc_state) |
1064 | { | 1064 | { |
1065 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | 1065 | const struct intel_shared_dpll *pll = crtc_state->shared_dpll; |
1066 | int clock = crtc->config->port_clock; | 1066 | int clock = crtc_state->port_clock; |
1067 | const enum intel_dpll_id id = pll->info->id; | 1067 | const enum intel_dpll_id id = pll->info->id; |
1068 | 1068 | ||
1069 | switch (id) { | 1069 | switch (id) { |
@@ -1517,7 +1517,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) | |||
1517 | else | 1517 | else |
1518 | dotclock = pipe_config->port_clock; | 1518 | dotclock = pipe_config->port_clock; |
1519 | 1519 | ||
1520 | if (pipe_config->ycbcr420) | 1520 | if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
1521 | dotclock *= 2; | 1521 | dotclock *= 2; |
1522 | 1522 | ||
1523 | if (pipe_config->pixel_multiplier) | 1523 | if (pipe_config->pixel_multiplier) |
@@ -1737,16 +1737,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, | |||
1737 | { | 1737 | { |
1738 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 1738 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
1739 | 1739 | ||
1740 | if (INTEL_GEN(dev_priv) <= 8) | 1740 | if (IS_ICELAKE(dev_priv)) |
1741 | hsw_ddi_clock_get(encoder, pipe_config); | 1741 | icl_ddi_clock_get(encoder, pipe_config); |
1742 | else if (IS_GEN9_BC(dev_priv)) | ||
1743 | skl_ddi_clock_get(encoder, pipe_config); | ||
1744 | else if (IS_GEN9_LP(dev_priv)) | ||
1745 | bxt_ddi_clock_get(encoder, pipe_config); | ||
1746 | else if (IS_CANNONLAKE(dev_priv)) | 1742 | else if (IS_CANNONLAKE(dev_priv)) |
1747 | cnl_ddi_clock_get(encoder, pipe_config); | 1743 | cnl_ddi_clock_get(encoder, pipe_config); |
1748 | else if (IS_ICELAKE(dev_priv)) | 1744 | else if (IS_GEN9_LP(dev_priv)) |
1749 | icl_ddi_clock_get(encoder, pipe_config); | 1745 | bxt_ddi_clock_get(encoder, pipe_config); |
1746 | else if (IS_GEN9_BC(dev_priv)) | ||
1747 | skl_ddi_clock_get(encoder, pipe_config); | ||
1748 | else if (INTEL_GEN(dev_priv) <= 8) | ||
1749 | hsw_ddi_clock_get(encoder, pipe_config); | ||
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) | 1752 | void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) |
@@ -1784,6 +1784,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) | |||
1784 | break; | 1784 | break; |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | /* | ||
1788 | * As per DP 1.2 spec section 2.3.4.3 while sending | ||
1789 | * YCBCR 444 signals we should program MSA MISC1/0 fields with | ||
1790 | * colorspace information. The output colorspace encoding is BT601. | ||
1791 | */ | ||
1792 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) | ||
1793 | temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR; | ||
1787 | I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); | 1794 | I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); |
1788 | } | 1795 | } |
1789 | 1796 | ||
@@ -1998,24 +2005,24 @@ out: | |||
1998 | return ret; | 2005 | return ret; |
1999 | } | 2006 | } |
2000 | 2007 | ||
2001 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | 2008 | static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, |
2002 | enum pipe *pipe) | 2009 | u8 *pipe_mask, bool *is_dp_mst) |
2003 | { | 2010 | { |
2004 | struct drm_device *dev = encoder->base.dev; | 2011 | struct drm_device *dev = encoder->base.dev; |
2005 | struct drm_i915_private *dev_priv = to_i915(dev); | 2012 | struct drm_i915_private *dev_priv = to_i915(dev); |
2006 | enum port port = encoder->port; | 2013 | enum port port = encoder->port; |
2007 | enum pipe p; | 2014 | enum pipe p; |
2008 | u32 tmp; | 2015 | u32 tmp; |
2009 | bool ret; | 2016 | u8 mst_pipe_mask; |
2017 | |||
2018 | *pipe_mask = 0; | ||
2019 | *is_dp_mst = false; | ||
2010 | 2020 | ||
2011 | if (!intel_display_power_get_if_enabled(dev_priv, | 2021 | if (!intel_display_power_get_if_enabled(dev_priv, |
2012 | encoder->power_domain)) | 2022 | encoder->power_domain)) |
2013 | return false; | 2023 | return; |
2014 | |||
2015 | ret = false; | ||
2016 | 2024 | ||
2017 | tmp = I915_READ(DDI_BUF_CTL(port)); | 2025 | tmp = I915_READ(DDI_BUF_CTL(port)); |
2018 | |||
2019 | if (!(tmp & DDI_BUF_CTL_ENABLE)) | 2026 | if (!(tmp & DDI_BUF_CTL_ENABLE)) |
2020 | goto out; | 2027 | goto out; |
2021 | 2028 | ||
@@ -2023,44 +2030,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
2023 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); | 2030 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
2024 | 2031 | ||
2025 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { | 2032 | switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { |
2033 | default: | ||
2034 | MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK); | ||
2035 | /* fallthrough */ | ||
2026 | case TRANS_DDI_EDP_INPUT_A_ON: | 2036 | case TRANS_DDI_EDP_INPUT_A_ON: |
2027 | case TRANS_DDI_EDP_INPUT_A_ONOFF: | 2037 | case TRANS_DDI_EDP_INPUT_A_ONOFF: |
2028 | *pipe = PIPE_A; | 2038 | *pipe_mask = BIT(PIPE_A); |
2029 | break; | 2039 | break; |
2030 | case TRANS_DDI_EDP_INPUT_B_ONOFF: | 2040 | case TRANS_DDI_EDP_INPUT_B_ONOFF: |
2031 | *pipe = PIPE_B; | 2041 | *pipe_mask = BIT(PIPE_B); |
2032 | break; | 2042 | break; |
2033 | case TRANS_DDI_EDP_INPUT_C_ONOFF: | 2043 | case TRANS_DDI_EDP_INPUT_C_ONOFF: |
2034 | *pipe = PIPE_C; | 2044 | *pipe_mask = BIT(PIPE_C); |
2035 | break; | 2045 | break; |
2036 | } | 2046 | } |
2037 | 2047 | ||
2038 | ret = true; | ||
2039 | |||
2040 | goto out; | 2048 | goto out; |
2041 | } | 2049 | } |
2042 | 2050 | ||
2051 | mst_pipe_mask = 0; | ||
2043 | for_each_pipe(dev_priv, p) { | 2052 | for_each_pipe(dev_priv, p) { |
2044 | enum transcoder cpu_transcoder = (enum transcoder) p; | 2053 | enum transcoder cpu_transcoder = (enum transcoder)p; |
2045 | 2054 | ||
2046 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); | 2055 | tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
2047 | 2056 | ||
2048 | if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { | 2057 | if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port)) |
2049 | if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == | 2058 | continue; |
2050 | TRANS_DDI_MODE_SELECT_DP_MST) | ||
2051 | goto out; | ||
2052 | 2059 | ||
2053 | *pipe = p; | 2060 | if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == |
2054 | ret = true; | 2061 | TRANS_DDI_MODE_SELECT_DP_MST) |
2062 | mst_pipe_mask |= BIT(p); | ||
2055 | 2063 | ||
2056 | goto out; | 2064 | *pipe_mask |= BIT(p); |
2057 | } | ||
2058 | } | 2065 | } |
2059 | 2066 | ||
2060 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); | 2067 | if (!*pipe_mask) |
2068 | DRM_DEBUG_KMS("No pipe for ddi port %c found\n", | ||
2069 | port_name(port)); | ||
2070 | |||
2071 | if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { | ||
2072 | DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n", | ||
2073 | port_name(port), *pipe_mask); | ||
2074 | *pipe_mask = BIT(ffs(*pipe_mask) - 1); | ||
2075 | } | ||
2076 | |||
2077 | if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) | ||
2078 | DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n", | ||
2079 | port_name(port), *pipe_mask, mst_pipe_mask); | ||
2080 | else | ||
2081 | *is_dp_mst = mst_pipe_mask; | ||
2061 | 2082 | ||
2062 | out: | 2083 | out: |
2063 | if (ret && IS_GEN9_LP(dev_priv)) { | 2084 | if (*pipe_mask && IS_GEN9_LP(dev_priv)) { |
2064 | tmp = I915_READ(BXT_PHY_CTL(port)); | 2085 | tmp = I915_READ(BXT_PHY_CTL(port)); |
2065 | if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | | 2086 | if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | |
2066 | BXT_PHY_LANE_POWERDOWN_ACK | | 2087 | BXT_PHY_LANE_POWERDOWN_ACK | |
@@ -2070,12 +2091,26 @@ out: | |||
2070 | } | 2091 | } |
2071 | 2092 | ||
2072 | intel_display_power_put(dev_priv, encoder->power_domain); | 2093 | intel_display_power_put(dev_priv, encoder->power_domain); |
2094 | } | ||
2073 | 2095 | ||
2074 | return ret; | 2096 | bool intel_ddi_get_hw_state(struct intel_encoder *encoder, |
2097 | enum pipe *pipe) | ||
2098 | { | ||
2099 | u8 pipe_mask; | ||
2100 | bool is_mst; | ||
2101 | |||
2102 | intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); | ||
2103 | |||
2104 | if (is_mst || !pipe_mask) | ||
2105 | return false; | ||
2106 | |||
2107 | *pipe = ffs(pipe_mask) - 1; | ||
2108 | |||
2109 | return true; | ||
2075 | } | 2110 | } |
2076 | 2111 | ||
2077 | static inline enum intel_display_power_domain | 2112 | static inline enum intel_display_power_domain |
2078 | intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) | 2113 | intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port) |
2079 | { | 2114 | { |
2080 | /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with | 2115 | /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with |
2081 | * DC states enabled at the same time, while for driver initiated AUX | 2116 | * DC states enabled at the same time, while for driver initiated AUX |
@@ -2089,13 +2124,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp) | |||
2089 | * Note that PSR is enabled only on Port A even though this function | 2124 | * Note that PSR is enabled only on Port A even though this function |
2090 | * returns the correct domain for other ports too. | 2125 | * returns the correct domain for other ports too. |
2091 | */ | 2126 | */ |
2092 | return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : | 2127 | return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A : |
2093 | intel_dp->aux_power_domain; | 2128 | intel_aux_power_domain(dig_port); |
2094 | } | 2129 | } |
2095 | 2130 | ||
2096 | static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | 2131 | static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, |
2097 | struct intel_crtc_state *crtc_state) | 2132 | struct intel_crtc_state *crtc_state) |
2098 | { | 2133 | { |
2134 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
2099 | struct intel_digital_port *dig_port; | 2135 | struct intel_digital_port *dig_port; |
2100 | u64 domains; | 2136 | u64 domains; |
2101 | 2137 | ||
@@ -2110,12 +2146,13 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder, | |||
2110 | dig_port = enc_to_dig_port(&encoder->base); | 2146 | dig_port = enc_to_dig_port(&encoder->base); |
2111 | domains = BIT_ULL(dig_port->ddi_io_power_domain); | 2147 | domains = BIT_ULL(dig_port->ddi_io_power_domain); |
2112 | 2148 | ||
2113 | /* AUX power is only needed for (e)DP mode, not for HDMI. */ | 2149 | /* |
2114 | if (intel_crtc_has_dp_encoder(crtc_state)) { | 2150 | * AUX power is only needed for (e)DP mode, and for HDMI mode on TC |
2115 | struct intel_dp *intel_dp = &dig_port->dp; | 2151 | * ports. |
2116 | 2152 | */ | |
2117 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp)); | 2153 | if (intel_crtc_has_dp_encoder(crtc_state) || |
2118 | } | 2154 | intel_port_is_tc(dev_priv, encoder->port)) |
2155 | domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port)); | ||
2119 | 2156 | ||
2120 | return domains; | 2157 | return domains; |
2121 | } | 2158 | } |
@@ -2813,12 +2850,59 @@ void icl_unmap_plls_to_ports(struct drm_crtc *crtc, | |||
2813 | } | 2850 | } |
2814 | } | 2851 | } |
2815 | 2852 | ||
2853 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder) | ||
2854 | { | ||
2855 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
2856 | u32 val; | ||
2857 | enum port port = encoder->port; | ||
2858 | bool clk_enabled; | ||
2859 | |||
2860 | /* | ||
2861 | * In case of DP MST, we sanitize the primary encoder only, not the | ||
2862 | * virtual ones. | ||
2863 | */ | ||
2864 | if (encoder->type == INTEL_OUTPUT_DP_MST) | ||
2865 | return; | ||
2866 | |||
2867 | val = I915_READ(DPCLKA_CFGCR0_ICL); | ||
2868 | clk_enabled = !(val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)); | ||
2869 | |||
2870 | if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) { | ||
2871 | u8 pipe_mask; | ||
2872 | bool is_mst; | ||
2873 | |||
2874 | intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst); | ||
2875 | /* | ||
2876 | * In the unlikely case that BIOS enables DP in MST mode, just | ||
2877 | * warn since our MST HW readout is incomplete. | ||
2878 | */ | ||
2879 | if (WARN_ON(is_mst)) | ||
2880 | return; | ||
2881 | } | ||
2882 | |||
2883 | if (clk_enabled == !!encoder->base.crtc) | ||
2884 | return; | ||
2885 | |||
2886 | /* | ||
2887 | * Punt on the case now where clock is disabled, but the encoder is | ||
2888 | * enabled, something else is really broken then. | ||
2889 | */ | ||
2890 | if (WARN_ON(!clk_enabled)) | ||
2891 | return; | ||
2892 | |||
2893 | DRM_NOTE("Port %c is disabled but it has a mapped PLL, unmap it\n", | ||
2894 | port_name(port)); | ||
2895 | val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port); | ||
2896 | I915_WRITE(DPCLKA_CFGCR0_ICL, val); | ||
2897 | } | ||
2898 | |||
2816 | static void intel_ddi_clk_select(struct intel_encoder *encoder, | 2899 | static void intel_ddi_clk_select(struct intel_encoder *encoder, |
2817 | const struct intel_shared_dpll *pll) | 2900 | const struct intel_crtc_state *crtc_state) |
2818 | { | 2901 | { |
2819 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2902 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2820 | enum port port = encoder->port; | 2903 | enum port port = encoder->port; |
2821 | uint32_t val; | 2904 | uint32_t val; |
2905 | const struct intel_shared_dpll *pll = crtc_state->shared_dpll; | ||
2822 | 2906 | ||
2823 | if (WARN_ON(!pll)) | 2907 | if (WARN_ON(!pll)) |
2824 | return; | 2908 | return; |
@@ -2828,7 +2912,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, | |||
2828 | if (IS_ICELAKE(dev_priv)) { | 2912 | if (IS_ICELAKE(dev_priv)) { |
2829 | if (!intel_port_is_combophy(dev_priv, port)) | 2913 | if (!intel_port_is_combophy(dev_priv, port)) |
2830 | I915_WRITE(DDI_CLK_SEL(port), | 2914 | I915_WRITE(DDI_CLK_SEL(port), |
2831 | icl_pll_to_ddi_pll_sel(encoder, pll)); | 2915 | icl_pll_to_ddi_pll_sel(encoder, crtc_state)); |
2832 | } else if (IS_CANNONLAKE(dev_priv)) { | 2916 | } else if (IS_CANNONLAKE(dev_priv)) { |
2833 | /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ | 2917 | /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ |
2834 | val = I915_READ(DPCLKA_CFGCR0); | 2918 | val = I915_READ(DPCLKA_CFGCR0); |
@@ -2881,6 +2965,137 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) | |||
2881 | } | 2965 | } |
2882 | } | 2966 | } |
2883 | 2967 | ||
2968 | static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) | ||
2969 | { | ||
2970 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
2971 | enum port port = dig_port->base.port; | ||
2972 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
2973 | i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; | ||
2974 | u32 val; | ||
2975 | int i; | ||
2976 | |||
2977 | if (tc_port == PORT_TC_NONE) | ||
2978 | return; | ||
2979 | |||
2980 | for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { | ||
2981 | val = I915_READ(mg_regs[i]); | ||
2982 | val |= MG_DP_MODE_CFG_TR2PWR_GATING | | ||
2983 | MG_DP_MODE_CFG_TRPWR_GATING | | ||
2984 | MG_DP_MODE_CFG_CLNPWR_GATING | | ||
2985 | MG_DP_MODE_CFG_DIGPWR_GATING | | ||
2986 | MG_DP_MODE_CFG_GAONPWR_GATING; | ||
2987 | I915_WRITE(mg_regs[i], val); | ||
2988 | } | ||
2989 | |||
2990 | val = I915_READ(MG_MISC_SUS0(tc_port)); | ||
2991 | val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | | ||
2992 | MG_MISC_SUS0_CFG_TR2PWR_GATING | | ||
2993 | MG_MISC_SUS0_CFG_CL2PWR_GATING | | ||
2994 | MG_MISC_SUS0_CFG_GAONPWR_GATING | | ||
2995 | MG_MISC_SUS0_CFG_TRPWR_GATING | | ||
2996 | MG_MISC_SUS0_CFG_CL1PWR_GATING | | ||
2997 | MG_MISC_SUS0_CFG_DGPWR_GATING; | ||
2998 | I915_WRITE(MG_MISC_SUS0(tc_port), val); | ||
2999 | } | ||
3000 | |||
3001 | static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) | ||
3002 | { | ||
3003 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
3004 | enum port port = dig_port->base.port; | ||
3005 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
3006 | i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; | ||
3007 | u32 val; | ||
3008 | int i; | ||
3009 | |||
3010 | if (tc_port == PORT_TC_NONE) | ||
3011 | return; | ||
3012 | |||
3013 | for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { | ||
3014 | val = I915_READ(mg_regs[i]); | ||
3015 | val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | | ||
3016 | MG_DP_MODE_CFG_TRPWR_GATING | | ||
3017 | MG_DP_MODE_CFG_CLNPWR_GATING | | ||
3018 | MG_DP_MODE_CFG_DIGPWR_GATING | | ||
3019 | MG_DP_MODE_CFG_GAONPWR_GATING); | ||
3020 | I915_WRITE(mg_regs[i], val); | ||
3021 | } | ||
3022 | |||
3023 | val = I915_READ(MG_MISC_SUS0(tc_port)); | ||
3024 | val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | | ||
3025 | MG_MISC_SUS0_CFG_TR2PWR_GATING | | ||
3026 | MG_MISC_SUS0_CFG_CL2PWR_GATING | | ||
3027 | MG_MISC_SUS0_CFG_GAONPWR_GATING | | ||
3028 | MG_MISC_SUS0_CFG_TRPWR_GATING | | ||
3029 | MG_MISC_SUS0_CFG_CL1PWR_GATING | | ||
3030 | MG_MISC_SUS0_CFG_DGPWR_GATING); | ||
3031 | I915_WRITE(MG_MISC_SUS0(tc_port), val); | ||
3032 | } | ||
3033 | |||
3034 | static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) | ||
3035 | { | ||
3036 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | ||
3037 | enum port port = intel_dig_port->base.port; | ||
3038 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
3039 | u32 ln0, ln1, lane_info; | ||
3040 | |||
3041 | if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) | ||
3042 | return; | ||
3043 | |||
3044 | ln0 = I915_READ(MG_DP_MODE(port, 0)); | ||
3045 | ln1 = I915_READ(MG_DP_MODE(port, 1)); | ||
3046 | |||
3047 | switch (intel_dig_port->tc_type) { | ||
3048 | case TC_PORT_TYPEC: | ||
3049 | ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); | ||
3050 | ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); | ||
3051 | |||
3052 | lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & | ||
3053 | DP_LANE_ASSIGNMENT_MASK(tc_port)) >> | ||
3054 | DP_LANE_ASSIGNMENT_SHIFT(tc_port); | ||
3055 | |||
3056 | switch (lane_info) { | ||
3057 | case 0x1: | ||
3058 | case 0x4: | ||
3059 | break; | ||
3060 | case 0x2: | ||
3061 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; | ||
3062 | break; | ||
3063 | case 0x3: | ||
3064 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
3065 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3066 | break; | ||
3067 | case 0x8: | ||
3068 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; | ||
3069 | break; | ||
3070 | case 0xC: | ||
3071 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
3072 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3073 | break; | ||
3074 | case 0xF: | ||
3075 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
3076 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3077 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
3078 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3079 | break; | ||
3080 | default: | ||
3081 | MISSING_CASE(lane_info); | ||
3082 | } | ||
3083 | break; | ||
3084 | |||
3085 | case TC_PORT_LEGACY: | ||
3086 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3087 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; | ||
3088 | break; | ||
3089 | |||
3090 | default: | ||
3091 | MISSING_CASE(intel_dig_port->tc_type); | ||
3092 | return; | ||
3093 | } | ||
3094 | |||
3095 | I915_WRITE(MG_DP_MODE(port, 0), ln0); | ||
3096 | I915_WRITE(MG_DP_MODE(port, 1), ln1); | ||
3097 | } | ||
3098 | |||
2884 | static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | 3099 | static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, |
2885 | const struct intel_crtc_state *crtc_state, | 3100 | const struct intel_crtc_state *crtc_state, |
2886 | const struct drm_connector_state *conn_state) | 3101 | const struct drm_connector_state *conn_state) |
@@ -2894,19 +3109,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, | |||
2894 | 3109 | ||
2895 | WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); | 3110 | WARN_ON(is_mst && (port == PORT_A || port == PORT_E)); |
2896 | 3111 | ||
2897 | intel_display_power_get(dev_priv, | ||
2898 | intel_ddi_main_link_aux_domain(intel_dp)); | ||
2899 | |||
2900 | intel_dp_set_link_params(intel_dp, crtc_state->port_clock, | 3112 | intel_dp_set_link_params(intel_dp, crtc_state->port_clock, |
2901 | crtc_state->lane_count, is_mst); | 3113 | crtc_state->lane_count, is_mst); |
2902 | 3114 | ||
2903 | intel_edp_panel_on(intel_dp); | 3115 | intel_edp_panel_on(intel_dp); |
2904 | 3116 | ||
2905 | intel_ddi_clk_select(encoder, crtc_state->shared_dpll); | 3117 | intel_ddi_clk_select(encoder, crtc_state); |
2906 | 3118 | ||
2907 | intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); | 3119 | intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); |
2908 | 3120 | ||
2909 | icl_program_mg_dp_mode(intel_dp); | 3121 | icl_program_mg_dp_mode(dig_port); |
2910 | icl_disable_phy_clock_gating(dig_port); | 3122 | icl_disable_phy_clock_gating(dig_port); |
2911 | 3123 | ||
2912 | if (IS_ICELAKE(dev_priv)) | 3124 | if (IS_ICELAKE(dev_priv)) |
@@ -2944,10 +3156,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | |||
2944 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | 3156 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); |
2945 | 3157 | ||
2946 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); | 3158 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); |
2947 | intel_ddi_clk_select(encoder, crtc_state->shared_dpll); | 3159 | intel_ddi_clk_select(encoder, crtc_state); |
2948 | 3160 | ||
2949 | intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); | 3161 | intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); |
2950 | 3162 | ||
3163 | icl_program_mg_dp_mode(dig_port); | ||
3164 | icl_disable_phy_clock_gating(dig_port); | ||
3165 | |||
2951 | if (IS_ICELAKE(dev_priv)) | 3166 | if (IS_ICELAKE(dev_priv)) |
2952 | icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, | 3167 | icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, |
2953 | level, INTEL_OUTPUT_HDMI); | 3168 | level, INTEL_OUTPUT_HDMI); |
@@ -2958,12 +3173,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, | |||
2958 | else | 3173 | else |
2959 | intel_prepare_hdmi_ddi_buffers(encoder, level); | 3174 | intel_prepare_hdmi_ddi_buffers(encoder, level); |
2960 | 3175 | ||
3176 | icl_enable_phy_clock_gating(dig_port); | ||
3177 | |||
2961 | if (IS_GEN9_BC(dev_priv)) | 3178 | if (IS_GEN9_BC(dev_priv)) |
2962 | skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); | 3179 | skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); |
2963 | 3180 | ||
2964 | intel_ddi_enable_pipe_clock(crtc_state); | 3181 | intel_ddi_enable_pipe_clock(crtc_state); |
2965 | 3182 | ||
2966 | intel_dig_port->set_infoframes(&encoder->base, | 3183 | intel_dig_port->set_infoframes(encoder, |
2967 | crtc_state->has_infoframe, | 3184 | crtc_state->has_infoframe, |
2968 | crtc_state, conn_state); | 3185 | crtc_state, conn_state); |
2969 | } | 3186 | } |
@@ -2993,10 +3210,22 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, | |||
2993 | 3210 | ||
2994 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); | 3211 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
2995 | 3212 | ||
2996 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) | 3213 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { |
2997 | intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); | 3214 | intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state); |
2998 | else | 3215 | } else { |
3216 | struct intel_lspcon *lspcon = | ||
3217 | enc_to_intel_lspcon(&encoder->base); | ||
3218 | |||
2999 | intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); | 3219 | intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state); |
3220 | if (lspcon->active) { | ||
3221 | struct intel_digital_port *dig_port = | ||
3222 | enc_to_dig_port(&encoder->base); | ||
3223 | |||
3224 | dig_port->set_infoframes(encoder, | ||
3225 | crtc_state->has_infoframe, | ||
3226 | crtc_state, conn_state); | ||
3227 | } | ||
3228 | } | ||
3000 | } | 3229 | } |
3001 | 3230 | ||
3002 | static void intel_disable_ddi_buf(struct intel_encoder *encoder) | 3231 | static void intel_disable_ddi_buf(struct intel_encoder *encoder) |
@@ -3049,9 +3278,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, | |||
3049 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); | 3278 | intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain); |
3050 | 3279 | ||
3051 | intel_ddi_clk_disable(encoder); | 3280 | intel_ddi_clk_disable(encoder); |
3052 | |||
3053 | intel_display_power_put(dev_priv, | ||
3054 | intel_ddi_main_link_aux_domain(intel_dp)); | ||
3055 | } | 3281 | } |
3056 | 3282 | ||
3057 | static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, | 3283 | static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, |
@@ -3062,7 +3288,7 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, | |||
3062 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | 3288 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); |
3063 | struct intel_hdmi *intel_hdmi = &dig_port->hdmi; | 3289 | struct intel_hdmi *intel_hdmi = &dig_port->hdmi; |
3064 | 3290 | ||
3065 | dig_port->set_infoframes(&encoder->base, false, | 3291 | dig_port->set_infoframes(encoder, false, |
3066 | old_crtc_state, old_conn_state); | 3292 | old_crtc_state, old_conn_state); |
3067 | 3293 | ||
3068 | intel_ddi_disable_pipe_clock(old_crtc_state); | 3294 | intel_ddi_disable_pipe_clock(old_crtc_state); |
@@ -3154,6 +3380,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, | |||
3154 | intel_audio_codec_enable(encoder, crtc_state, conn_state); | 3380 | intel_audio_codec_enable(encoder, crtc_state, conn_state); |
3155 | } | 3381 | } |
3156 | 3382 | ||
3383 | static i915_reg_t | ||
3384 | gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, | ||
3385 | enum port port) | ||
3386 | { | ||
3387 | static const i915_reg_t regs[] = { | ||
3388 | [PORT_A] = CHICKEN_TRANS_EDP, | ||
3389 | [PORT_B] = CHICKEN_TRANS_A, | ||
3390 | [PORT_C] = CHICKEN_TRANS_B, | ||
3391 | [PORT_D] = CHICKEN_TRANS_C, | ||
3392 | [PORT_E] = CHICKEN_TRANS_A, | ||
3393 | }; | ||
3394 | |||
3395 | WARN_ON(INTEL_GEN(dev_priv) < 9); | ||
3396 | |||
3397 | if (WARN_ON(port < PORT_A || port > PORT_E)) | ||
3398 | port = PORT_A; | ||
3399 | |||
3400 | return regs[port]; | ||
3401 | } | ||
3402 | |||
3157 | static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, | 3403 | static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, |
3158 | const struct intel_crtc_state *crtc_state, | 3404 | const struct intel_crtc_state *crtc_state, |
3159 | const struct drm_connector_state *conn_state) | 3405 | const struct drm_connector_state *conn_state) |
@@ -3177,17 +3423,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, | |||
3177 | * the bits affect a specific DDI port rather than | 3423 | * the bits affect a specific DDI port rather than |
3178 | * a specific transcoder. | 3424 | * a specific transcoder. |
3179 | */ | 3425 | */ |
3180 | static const enum transcoder port_to_transcoder[] = { | 3426 | i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port); |
3181 | [PORT_A] = TRANSCODER_EDP, | ||
3182 | [PORT_B] = TRANSCODER_A, | ||
3183 | [PORT_C] = TRANSCODER_B, | ||
3184 | [PORT_D] = TRANSCODER_C, | ||
3185 | [PORT_E] = TRANSCODER_A, | ||
3186 | }; | ||
3187 | enum transcoder transcoder = port_to_transcoder[port]; | ||
3188 | u32 val; | 3427 | u32 val; |
3189 | 3428 | ||
3190 | val = I915_READ(CHICKEN_TRANS(transcoder)); | 3429 | val = I915_READ(reg); |
3191 | 3430 | ||
3192 | if (port == PORT_E) | 3431 | if (port == PORT_E) |
3193 | val |= DDIE_TRAINING_OVERRIDE_ENABLE | | 3432 | val |= DDIE_TRAINING_OVERRIDE_ENABLE | |
@@ -3196,8 +3435,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, | |||
3196 | val |= DDI_TRAINING_OVERRIDE_ENABLE | | 3435 | val |= DDI_TRAINING_OVERRIDE_ENABLE | |
3197 | DDI_TRAINING_OVERRIDE_VALUE; | 3436 | DDI_TRAINING_OVERRIDE_VALUE; |
3198 | 3437 | ||
3199 | I915_WRITE(CHICKEN_TRANS(transcoder), val); | 3438 | I915_WRITE(reg, val); |
3200 | POSTING_READ(CHICKEN_TRANS(transcoder)); | 3439 | POSTING_READ(reg); |
3201 | 3440 | ||
3202 | udelay(1); | 3441 | udelay(1); |
3203 | 3442 | ||
@@ -3208,7 +3447,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, | |||
3208 | val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | | 3447 | val &= ~(DDI_TRAINING_OVERRIDE_ENABLE | |
3209 | DDI_TRAINING_OVERRIDE_VALUE); | 3448 | DDI_TRAINING_OVERRIDE_VALUE); |
3210 | 3449 | ||
3211 | I915_WRITE(CHICKEN_TRANS(transcoder), val); | 3450 | I915_WRITE(reg, val); |
3212 | } | 3451 | } |
3213 | 3452 | ||
3214 | /* In HDMI/DVI mode, the port width, and swing/emphasis values | 3453 | /* In HDMI/DVI mode, the port width, and swing/emphasis values |
@@ -3282,13 +3521,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder, | |||
3282 | intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); | 3521 | intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state); |
3283 | } | 3522 | } |
3284 | 3523 | ||
3285 | static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder, | 3524 | static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, |
3286 | const struct intel_crtc_state *pipe_config, | 3525 | const struct intel_crtc_state *pipe_config, |
3287 | const struct drm_connector_state *conn_state) | 3526 | enum port port) |
3527 | { | ||
3528 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
3529 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | ||
3530 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
3531 | u32 val = I915_READ(PORT_TX_DFLEXDPMLE1); | ||
3532 | bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; | ||
3533 | |||
3534 | val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); | ||
3535 | switch (pipe_config->lane_count) { | ||
3536 | case 1: | ||
3537 | val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : | ||
3538 | DFLEXDPMLE1_DPMLETC_ML0(tc_port); | ||
3539 | break; | ||
3540 | case 2: | ||
3541 | val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : | ||
3542 | DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); | ||
3543 | break; | ||
3544 | case 4: | ||
3545 | val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); | ||
3546 | break; | ||
3547 | default: | ||
3548 | MISSING_CASE(pipe_config->lane_count); | ||
3549 | } | ||
3550 | I915_WRITE(PORT_TX_DFLEXDPMLE1, val); | ||
3551 | } | ||
3552 | |||
3553 | static void | ||
3554 | intel_ddi_pre_pll_enable(struct intel_encoder *encoder, | ||
3555 | const struct intel_crtc_state *crtc_state, | ||
3556 | const struct drm_connector_state *conn_state) | ||
3557 | { | ||
3558 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
3559 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | ||
3560 | enum port port = encoder->port; | ||
3561 | |||
3562 | if (intel_crtc_has_dp_encoder(crtc_state) || | ||
3563 | intel_port_is_tc(dev_priv, encoder->port)) | ||
3564 | intel_display_power_get(dev_priv, | ||
3565 | intel_ddi_main_link_aux_domain(dig_port)); | ||
3566 | |||
3567 | if (IS_GEN9_LP(dev_priv)) | ||
3568 | bxt_ddi_phy_set_lane_optim_mask(encoder, | ||
3569 | crtc_state->lane_lat_optim_mask); | ||
3570 | |||
3571 | /* | ||
3572 | * Program the lane count for static/dynamic connections on Type-C ports. | ||
3573 | * Skip this step for TBT. | ||
3574 | */ | ||
3575 | if (dig_port->tc_type == TC_PORT_UNKNOWN || | ||
3576 | dig_port->tc_type == TC_PORT_TBT) | ||
3577 | return; | ||
3578 | |||
3579 | intel_ddi_set_fia_lane_count(encoder, crtc_state, port); | ||
3580 | } | ||
3581 | |||
3582 | static void | ||
3583 | intel_ddi_post_pll_disable(struct intel_encoder *encoder, | ||
3584 | const struct intel_crtc_state *crtc_state, | ||
3585 | const struct drm_connector_state *conn_state) | ||
3288 | { | 3586 | { |
3289 | uint8_t mask = pipe_config->lane_lat_optim_mask; | 3587 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
3588 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | ||
3290 | 3589 | ||
3291 | bxt_ddi_phy_set_lane_optim_mask(encoder, mask); | 3590 | if (intel_crtc_has_dp_encoder(crtc_state) || |
3591 | intel_port_is_tc(dev_priv, encoder->port)) | ||
3592 | intel_display_power_put(dev_priv, | ||
3593 | intel_ddi_main_link_aux_domain(dig_port)); | ||
3292 | } | 3594 | } |
3293 | 3595 | ||
3294 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) | 3596 | void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) |
@@ -3353,10 +3655,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, | |||
3353 | void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, | 3655 | void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, |
3354 | struct intel_crtc_state *crtc_state) | 3656 | struct intel_crtc_state *crtc_state) |
3355 | { | 3657 | { |
3356 | if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) | 3658 | if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) |
3357 | crtc_state->min_voltage_level = 2; | ||
3358 | else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) | ||
3359 | crtc_state->min_voltage_level = 1; | 3659 | crtc_state->min_voltage_level = 1; |
3660 | else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) | ||
3661 | crtc_state->min_voltage_level = 2; | ||
3360 | } | 3662 | } |
3361 | 3663 | ||
3362 | void intel_ddi_get_config(struct intel_encoder *encoder, | 3664 | void intel_ddi_get_config(struct intel_encoder *encoder, |
@@ -3406,7 +3708,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, | |||
3406 | pipe_config->has_hdmi_sink = true; | 3708 | pipe_config->has_hdmi_sink = true; |
3407 | intel_dig_port = enc_to_dig_port(&encoder->base); | 3709 | intel_dig_port = enc_to_dig_port(&encoder->base); |
3408 | 3710 | ||
3409 | if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) | 3711 | if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) |
3410 | pipe_config->has_infoframe = true; | 3712 | pipe_config->has_infoframe = true; |
3411 | 3713 | ||
3412 | if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == | 3714 | if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == |
@@ -3767,6 +4069,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3767 | struct intel_encoder *intel_encoder; | 4069 | struct intel_encoder *intel_encoder; |
3768 | struct drm_encoder *encoder; | 4070 | struct drm_encoder *encoder; |
3769 | bool init_hdmi, init_dp, init_lspcon = false; | 4071 | bool init_hdmi, init_dp, init_lspcon = false; |
4072 | enum pipe pipe; | ||
3770 | 4073 | ||
3771 | 4074 | ||
3772 | init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || | 4075 | init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || |
@@ -3805,8 +4108,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3805 | intel_encoder->compute_output_type = intel_ddi_compute_output_type; | 4108 | intel_encoder->compute_output_type = intel_ddi_compute_output_type; |
3806 | intel_encoder->compute_config = intel_ddi_compute_config; | 4109 | intel_encoder->compute_config = intel_ddi_compute_config; |
3807 | intel_encoder->enable = intel_enable_ddi; | 4110 | intel_encoder->enable = intel_enable_ddi; |
3808 | if (IS_GEN9_LP(dev_priv)) | 4111 | intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable; |
3809 | intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable; | 4112 | intel_encoder->post_pll_disable = intel_ddi_post_pll_disable; |
3810 | intel_encoder->pre_enable = intel_ddi_pre_enable; | 4113 | intel_encoder->pre_enable = intel_ddi_pre_enable; |
3811 | intel_encoder->disable = intel_disable_ddi; | 4114 | intel_encoder->disable = intel_disable_ddi; |
3812 | intel_encoder->post_disable = intel_ddi_post_disable; | 4115 | intel_encoder->post_disable = intel_ddi_post_disable; |
@@ -3817,8 +4120,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3817 | intel_encoder->type = INTEL_OUTPUT_DDI; | 4120 | intel_encoder->type = INTEL_OUTPUT_DDI; |
3818 | intel_encoder->power_domain = intel_port_to_power_domain(port); | 4121 | intel_encoder->power_domain = intel_port_to_power_domain(port); |
3819 | intel_encoder->port = port; | 4122 | intel_encoder->port = port; |
3820 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); | ||
3821 | intel_encoder->cloneable = 0; | 4123 | intel_encoder->cloneable = 0; |
4124 | for_each_pipe(dev_priv, pipe) | ||
4125 | intel_encoder->crtc_mask |= BIT(pipe); | ||
3822 | 4126 | ||
3823 | if (INTEL_GEN(dev_priv) >= 11) | 4127 | if (INTEL_GEN(dev_priv) >= 11) |
3824 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & | 4128 | intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & |
@@ -3828,6 +4132,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3828 | (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); | 4132 | (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); |
3829 | intel_dig_port->dp.output_reg = INVALID_MMIO_REG; | 4133 | intel_dig_port->dp.output_reg = INVALID_MMIO_REG; |
3830 | intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); | 4134 | intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); |
4135 | intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); | ||
3831 | 4136 | ||
3832 | switch (port) { | 4137 | switch (port) { |
3833 | case PORT_A: | 4138 | case PORT_A: |
@@ -3858,8 +4163,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3858 | MISSING_CASE(port); | 4163 | MISSING_CASE(port); |
3859 | } | 4164 | } |
3860 | 4165 | ||
3861 | intel_infoframe_init(intel_dig_port); | ||
3862 | |||
3863 | if (init_dp) { | 4166 | if (init_dp) { |
3864 | if (!intel_ddi_init_dp_connector(intel_dig_port)) | 4167 | if (!intel_ddi_init_dp_connector(intel_dig_port)) |
3865 | goto err; | 4168 | goto err; |
@@ -3888,6 +4191,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) | |||
3888 | port_name(port)); | 4191 | port_name(port)); |
3889 | } | 4192 | } |
3890 | 4193 | ||
4194 | intel_infoframe_init(intel_dig_port); | ||
3891 | return; | 4195 | return; |
3892 | 4196 | ||
3893 | err: | 4197 | err: |
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 0ef0c6448d53..ceecb5bd5226 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c | |||
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) | |||
474 | u8 eu_disabled_mask; | 474 | u8 eu_disabled_mask; |
475 | u32 n_disabled; | 475 | u32 n_disabled; |
476 | 476 | ||
477 | if (!(sseu->subslice_mask[ss] & BIT(ss))) | 477 | if (!(sseu->subslice_mask[s] & BIT(ss))) |
478 | /* skip disabled subslice */ | 478 | /* skip disabled subslice */ |
479 | continue; | 479 | continue; |
480 | 480 | ||
@@ -744,27 +744,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info) | |||
744 | if (INTEL_GEN(dev_priv) >= 10) { | 744 | if (INTEL_GEN(dev_priv) >= 10) { |
745 | for_each_pipe(dev_priv, pipe) | 745 | for_each_pipe(dev_priv, pipe) |
746 | info->num_scalers[pipe] = 2; | 746 | info->num_scalers[pipe] = 2; |
747 | } else if (INTEL_GEN(dev_priv) == 9) { | 747 | } else if (IS_GEN9(dev_priv)) { |
748 | info->num_scalers[PIPE_A] = 2; | 748 | info->num_scalers[PIPE_A] = 2; |
749 | info->num_scalers[PIPE_B] = 2; | 749 | info->num_scalers[PIPE_B] = 2; |
750 | info->num_scalers[PIPE_C] = 1; | 750 | info->num_scalers[PIPE_C] = 1; |
751 | } | 751 | } |
752 | 752 | ||
753 | BUILD_BUG_ON(I915_NUM_ENGINES > | 753 | BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); |
754 | sizeof(intel_ring_mask_t) * BITS_PER_BYTE); | ||
755 | 754 | ||
756 | /* | 755 | if (IS_GEN11(dev_priv)) |
757 | * Skylake and Broxton currently don't expose the topmost plane as its | 756 | for_each_pipe(dev_priv, pipe) |
758 | * use is exclusive with the legacy cursor and we only want to expose | 757 | info->num_sprites[pipe] = 6; |
759 | * one of those, not both. Until we can safely expose the topmost plane | 758 | else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) |
760 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | ||
761 | * we don't expose the topmost plane at all to prevent ABI breakage | ||
762 | * down the line. | ||
763 | */ | ||
764 | if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv)) | ||
765 | for_each_pipe(dev_priv, pipe) | 759 | for_each_pipe(dev_priv, pipe) |
766 | info->num_sprites[pipe] = 3; | 760 | info->num_sprites[pipe] = 3; |
767 | else if (IS_BROXTON(dev_priv)) { | 761 | else if (IS_BROXTON(dev_priv)) { |
762 | /* | ||
763 | * Skylake and Broxton currently don't expose the topmost plane as its | ||
764 | * use is exclusive with the legacy cursor and we only want to expose | ||
765 | * one of those, not both. Until we can safely expose the topmost plane | ||
766 | * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported, | ||
767 | * we don't expose the topmost plane at all to prevent ABI breakage | ||
768 | * down the line. | ||
769 | */ | ||
770 | |||
768 | info->num_sprites[PIPE_A] = 2; | 771 | info->num_sprites[PIPE_A] = 2; |
769 | info->num_sprites[PIPE_B] = 2; | 772 | info->num_sprites[PIPE_B] = 2; |
770 | info->num_sprites[PIPE_C] = 1; | 773 | info->num_sprites[PIPE_C] = 1; |
@@ -844,13 +847,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info) | |||
844 | cherryview_sseu_info_init(dev_priv); | 847 | cherryview_sseu_info_init(dev_priv); |
845 | else if (IS_BROADWELL(dev_priv)) | 848 | else if (IS_BROADWELL(dev_priv)) |
846 | broadwell_sseu_info_init(dev_priv); | 849 | broadwell_sseu_info_init(dev_priv); |
847 | else if (INTEL_GEN(dev_priv) == 9) | 850 | else if (IS_GEN9(dev_priv)) |
848 | gen9_sseu_info_init(dev_priv); | 851 | gen9_sseu_info_init(dev_priv); |
849 | else if (INTEL_GEN(dev_priv) == 10) | 852 | else if (IS_GEN10(dev_priv)) |
850 | gen10_sseu_info_init(dev_priv); | 853 | gen10_sseu_info_init(dev_priv); |
851 | else if (INTEL_GEN(dev_priv) >= 11) | 854 | else if (INTEL_GEN(dev_priv) >= 11) |
852 | gen11_sseu_info_init(dev_priv); | 855 | gen11_sseu_info_init(dev_priv); |
853 | 856 | ||
857 | if (IS_GEN6(dev_priv) && intel_vtd_active()) { | ||
858 | DRM_INFO("Disabling ppGTT for VT-d support\n"); | ||
859 | info->ppgtt = INTEL_PPGTT_NONE; | ||
860 | } | ||
861 | |||
854 | /* Initialize command stream timestamp frequency */ | 862 | /* Initialize command stream timestamp frequency */ |
855 | info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); | 863 | info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); |
856 | } | 864 | } |
@@ -872,40 +880,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, | |||
872 | void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) | 880 | void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) |
873 | { | 881 | { |
874 | struct intel_device_info *info = mkwrite_device_info(dev_priv); | 882 | struct intel_device_info *info = mkwrite_device_info(dev_priv); |
875 | u8 vdbox_disable, vebox_disable; | ||
876 | u32 media_fuse; | 883 | u32 media_fuse; |
877 | int i; | 884 | unsigned int i; |
878 | 885 | ||
879 | if (INTEL_GEN(dev_priv) < 11) | 886 | if (INTEL_GEN(dev_priv) < 11) |
880 | return; | 887 | return; |
881 | 888 | ||
882 | media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); | 889 | media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); |
883 | 890 | ||
884 | vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; | 891 | info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; |
885 | vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> | 892 | info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> |
886 | GEN11_GT_VEBOX_DISABLE_SHIFT; | 893 | GEN11_GT_VEBOX_DISABLE_SHIFT; |
887 | 894 | ||
888 | DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable); | 895 | DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable); |
889 | for (i = 0; i < I915_MAX_VCS; i++) { | 896 | for (i = 0; i < I915_MAX_VCS; i++) { |
890 | if (!HAS_ENGINE(dev_priv, _VCS(i))) | 897 | if (!HAS_ENGINE(dev_priv, _VCS(i))) |
891 | continue; | 898 | continue; |
892 | 899 | ||
893 | if (!(BIT(i) & vdbox_disable)) | 900 | if (!(BIT(i) & info->vdbox_enable)) { |
894 | continue; | 901 | info->ring_mask &= ~ENGINE_MASK(_VCS(i)); |
895 | 902 | DRM_DEBUG_DRIVER("vcs%u fused off\n", i); | |
896 | info->ring_mask &= ~ENGINE_MASK(_VCS(i)); | 903 | } |
897 | DRM_DEBUG_DRIVER("vcs%u fused off\n", i); | ||
898 | } | 904 | } |
899 | 905 | ||
900 | DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable); | 906 | DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable); |
901 | for (i = 0; i < I915_MAX_VECS; i++) { | 907 | for (i = 0; i < I915_MAX_VECS; i++) { |
902 | if (!HAS_ENGINE(dev_priv, _VECS(i))) | 908 | if (!HAS_ENGINE(dev_priv, _VECS(i))) |
903 | continue; | 909 | continue; |
904 | 910 | ||
905 | if (!(BIT(i) & vebox_disable)) | 911 | if (!(BIT(i) & info->vebox_enable)) { |
906 | continue; | 912 | info->ring_mask &= ~ENGINE_MASK(_VECS(i)); |
907 | 913 | DRM_DEBUG_DRIVER("vecs%u fused off\n", i); | |
908 | info->ring_mask &= ~ENGINE_MASK(_VECS(i)); | 914 | } |
909 | DRM_DEBUG_DRIVER("vecs%u fused off\n", i); | ||
910 | } | 915 | } |
911 | } | 916 | } |
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 6eecd64734d5..88f97210dc49 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h | |||
@@ -25,6 +25,8 @@ | |||
25 | #ifndef _INTEL_DEVICE_INFO_H_ | 25 | #ifndef _INTEL_DEVICE_INFO_H_ |
26 | #define _INTEL_DEVICE_INFO_H_ | 26 | #define _INTEL_DEVICE_INFO_H_ |
27 | 27 | ||
28 | #include <uapi/drm/i915_drm.h> | ||
29 | |||
28 | #include "intel_display.h" | 30 | #include "intel_display.h" |
29 | 31 | ||
30 | struct drm_printer; | 32 | struct drm_printer; |
@@ -74,21 +76,25 @@ enum intel_platform { | |||
74 | INTEL_MAX_PLATFORMS | 76 | INTEL_MAX_PLATFORMS |
75 | }; | 77 | }; |
76 | 78 | ||
79 | enum intel_ppgtt { | ||
80 | INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, | ||
81 | INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, | ||
82 | INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL, | ||
83 | INTEL_PPGTT_FULL_4LVL, | ||
84 | }; | ||
85 | |||
77 | #define DEV_INFO_FOR_EACH_FLAG(func) \ | 86 | #define DEV_INFO_FOR_EACH_FLAG(func) \ |
78 | func(is_mobile); \ | 87 | func(is_mobile); \ |
79 | func(is_lp); \ | 88 | func(is_lp); \ |
80 | func(is_alpha_support); \ | 89 | func(is_alpha_support); \ |
81 | /* Keep has_* in alphabetical order */ \ | 90 | /* Keep has_* in alphabetical order */ \ |
82 | func(has_64bit_reloc); \ | 91 | func(has_64bit_reloc); \ |
83 | func(has_aliasing_ppgtt); \ | ||
84 | func(has_csr); \ | 92 | func(has_csr); \ |
85 | func(has_ddi); \ | 93 | func(has_ddi); \ |
86 | func(has_dp_mst); \ | 94 | func(has_dp_mst); \ |
87 | func(has_reset_engine); \ | 95 | func(has_reset_engine); \ |
88 | func(has_fbc); \ | 96 | func(has_fbc); \ |
89 | func(has_fpga_dbg); \ | 97 | func(has_fpga_dbg); \ |
90 | func(has_full_ppgtt); \ | ||
91 | func(has_full_48bit_ppgtt); \ | ||
92 | func(has_gmch_display); \ | 98 | func(has_gmch_display); \ |
93 | func(has_guc); \ | 99 | func(has_guc); \ |
94 | func(has_guc_ct); \ | 100 | func(has_guc_ct); \ |
@@ -118,7 +124,7 @@ enum intel_platform { | |||
118 | 124 | ||
119 | struct sseu_dev_info { | 125 | struct sseu_dev_info { |
120 | u8 slice_mask; | 126 | u8 slice_mask; |
121 | u8 subslice_mask[GEN_MAX_SUBSLICES]; | 127 | u8 subslice_mask[GEN_MAX_SLICES]; |
122 | u16 eu_total; | 128 | u16 eu_total; |
123 | u8 eu_per_subslice; | 129 | u8 eu_per_subslice; |
124 | u8 min_eu_in_pool; | 130 | u8 min_eu_in_pool; |
@@ -154,6 +160,7 @@ struct intel_device_info { | |||
154 | enum intel_platform platform; | 160 | enum intel_platform platform; |
155 | u32 platform_mask; | 161 | u32 platform_mask; |
156 | 162 | ||
163 | enum intel_ppgtt ppgtt; | ||
157 | unsigned int page_sizes; /* page sizes supported by the HW */ | 164 | unsigned int page_sizes; /* page sizes supported by the HW */ |
158 | 165 | ||
159 | u32 display_mmio_offset; | 166 | u32 display_mmio_offset; |
@@ -170,7 +177,6 @@ struct intel_device_info { | |||
170 | /* Register offsets for the various display pipes and transcoders */ | 177 | /* Register offsets for the various display pipes and transcoders */ |
171 | int pipe_offsets[I915_MAX_TRANSCODERS]; | 178 | int pipe_offsets[I915_MAX_TRANSCODERS]; |
172 | int trans_offsets[I915_MAX_TRANSCODERS]; | 179 | int trans_offsets[I915_MAX_TRANSCODERS]; |
173 | int palette_offsets[I915_MAX_PIPES]; | ||
174 | int cursor_offsets[I915_MAX_PIPES]; | 180 | int cursor_offsets[I915_MAX_PIPES]; |
175 | 181 | ||
176 | /* Slice/subslice/EU info */ | 182 | /* Slice/subslice/EU info */ |
@@ -178,6 +184,10 @@ struct intel_device_info { | |||
178 | 184 | ||
179 | u32 cs_timestamp_frequency_khz; | 185 | u32 cs_timestamp_frequency_khz; |
180 | 186 | ||
187 | /* Enabled (not fused off) media engine bitmasks. */ | ||
188 | u8 vdbox_enable; | ||
189 | u8 vebox_enable; | ||
190 | |||
181 | struct color_luts { | 191 | struct color_luts { |
182 | u16 degamma_lut_size; | 192 | u16 degamma_lut_size; |
183 | u16 gamma_lut_size; | 193 | u16 gamma_lut_size; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9741cc419e1b..812ec5ae5c7b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/dmi.h> | ||
28 | #include <linux/module.h> | 27 | #include <linux/module.h> |
29 | #include <linux/input.h> | 28 | #include <linux/input.h> |
30 | #include <linux/i2c.h> | 29 | #include <linux/i2c.h> |
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = { | |||
74 | DRM_FORMAT_MOD_INVALID | 73 | DRM_FORMAT_MOD_INVALID |
75 | }; | 74 | }; |
76 | 75 | ||
77 | static const uint32_t skl_primary_formats[] = { | ||
78 | DRM_FORMAT_C8, | ||
79 | DRM_FORMAT_RGB565, | ||
80 | DRM_FORMAT_XRGB8888, | ||
81 | DRM_FORMAT_XBGR8888, | ||
82 | DRM_FORMAT_ARGB8888, | ||
83 | DRM_FORMAT_ABGR8888, | ||
84 | DRM_FORMAT_XRGB2101010, | ||
85 | DRM_FORMAT_XBGR2101010, | ||
86 | DRM_FORMAT_YUYV, | ||
87 | DRM_FORMAT_YVYU, | ||
88 | DRM_FORMAT_UYVY, | ||
89 | DRM_FORMAT_VYUY, | ||
90 | }; | ||
91 | |||
92 | static const uint32_t skl_pri_planar_formats[] = { | ||
93 | DRM_FORMAT_C8, | ||
94 | DRM_FORMAT_RGB565, | ||
95 | DRM_FORMAT_XRGB8888, | ||
96 | DRM_FORMAT_XBGR8888, | ||
97 | DRM_FORMAT_ARGB8888, | ||
98 | DRM_FORMAT_ABGR8888, | ||
99 | DRM_FORMAT_XRGB2101010, | ||
100 | DRM_FORMAT_XBGR2101010, | ||
101 | DRM_FORMAT_YUYV, | ||
102 | DRM_FORMAT_YVYU, | ||
103 | DRM_FORMAT_UYVY, | ||
104 | DRM_FORMAT_VYUY, | ||
105 | DRM_FORMAT_NV12, | ||
106 | }; | ||
107 | |||
108 | static const uint64_t skl_format_modifiers_noccs[] = { | ||
109 | I915_FORMAT_MOD_Yf_TILED, | ||
110 | I915_FORMAT_MOD_Y_TILED, | ||
111 | I915_FORMAT_MOD_X_TILED, | ||
112 | DRM_FORMAT_MOD_LINEAR, | ||
113 | DRM_FORMAT_MOD_INVALID | ||
114 | }; | ||
115 | |||
116 | static const uint64_t skl_format_modifiers_ccs[] = { | ||
117 | I915_FORMAT_MOD_Yf_TILED_CCS, | ||
118 | I915_FORMAT_MOD_Y_TILED_CCS, | ||
119 | I915_FORMAT_MOD_Yf_TILED, | ||
120 | I915_FORMAT_MOD_Y_TILED, | ||
121 | I915_FORMAT_MOD_X_TILED, | ||
122 | DRM_FORMAT_MOD_LINEAR, | ||
123 | DRM_FORMAT_MOD_INVALID | ||
124 | }; | ||
125 | |||
126 | /* Cursor formats */ | 76 | /* Cursor formats */ |
127 | static const uint32_t intel_cursor_formats[] = { | 77 | static const uint32_t intel_cursor_formats[] = { |
128 | DRM_FORMAT_ARGB8888, | 78 | DRM_FORMAT_ARGB8888, |
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, | |||
141 | static int intel_framebuffer_init(struct intel_framebuffer *ifb, | 91 | static int intel_framebuffer_init(struct intel_framebuffer *ifb, |
142 | struct drm_i915_gem_object *obj, | 92 | struct drm_i915_gem_object *obj, |
143 | struct drm_mode_fb_cmd2 *mode_cmd); | 93 | struct drm_mode_fb_cmd2 *mode_cmd); |
144 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); | 94 | static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state); |
145 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); | 95 | static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); |
146 | static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); | 96 | static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, |
147 | static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, | 97 | const struct intel_link_m_n *m_n, |
148 | struct intel_link_m_n *m_n, | 98 | const struct intel_link_m_n *m2_n2); |
149 | struct intel_link_m_n *m2_n2); | 99 | static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); |
150 | static void ironlake_set_pipeconf(struct drm_crtc *crtc); | 100 | static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state); |
151 | static void haswell_set_pipeconf(struct drm_crtc *crtc); | 101 | static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state); |
152 | static void haswell_set_pipemisc(struct drm_crtc *crtc); | 102 | static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state); |
153 | static void vlv_prepare_pll(struct intel_crtc *crtc, | 103 | static void vlv_prepare_pll(struct intel_crtc *crtc, |
154 | const struct intel_crtc_state *pipe_config); | 104 | const struct intel_crtc_state *pipe_config); |
155 | static void chv_prepare_pll(struct intel_crtc *crtc, | 105 | static void chv_prepare_pll(struct intel_crtc *crtc, |
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); | |||
158 | static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); | 108 | static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); |
159 | static void intel_crtc_init_scalers(struct intel_crtc *crtc, | 109 | static void intel_crtc_init_scalers(struct intel_crtc *crtc, |
160 | struct intel_crtc_state *crtc_state); | 110 | struct intel_crtc_state *crtc_state); |
161 | static void skylake_pfit_enable(struct intel_crtc *crtc); | 111 | static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); |
162 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); | 112 | static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state); |
163 | static void ironlake_pfit_enable(struct intel_crtc *crtc); | 113 | static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state); |
164 | static void intel_modeset_setup_hw_state(struct drm_device *dev, | 114 | static void intel_modeset_setup_hw_state(struct drm_device *dev, |
165 | struct drm_modeset_acquire_ctx *ctx); | 115 | struct drm_modeset_acquire_ctx *ctx); |
166 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); | 116 | static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); |
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = { | |||
506 | }; | 456 | }; |
507 | 457 | ||
508 | static void | 458 | static void |
509 | skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable) | ||
510 | { | ||
511 | if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) | ||
512 | return; | ||
513 | |||
514 | if (enable) | ||
515 | I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS); | ||
516 | else | ||
517 | I915_WRITE(CHICKEN_PIPESL_1(pipe), 0); | ||
518 | } | ||
519 | |||
520 | static void | ||
521 | skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable) | 459 | skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable) |
522 | { | 460 | { |
523 | if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) | ||
524 | return; | ||
525 | |||
526 | if (enable) | 461 | if (enable) |
527 | I915_WRITE(CLKGATE_DIS_PSL(pipe), | 462 | I915_WRITE(CLKGATE_DIS_PSL(pipe), |
528 | DUPS1_GATING_DIS | DUPS2_GATING_DIS); | 463 | DUPS1_GATING_DIS | DUPS2_GATING_DIS); |
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |||
1381 | "PCH LVDS enabled on transcoder %c, should be disabled\n", | 1316 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
1382 | pipe_name(pipe)); | 1317 | pipe_name(pipe)); |
1383 | 1318 | ||
1319 | /* PCH SDVOB multiplex with HDMIB */ | ||
1384 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); | 1320 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); |
1385 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); | 1321 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); |
1386 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); | 1322 | assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); |
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, | |||
1565 | } | 1501 | } |
1566 | } | 1502 | } |
1567 | 1503 | ||
1568 | static void i9xx_disable_pll(struct intel_crtc *crtc) | 1504 | static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) |
1569 | { | 1505 | { |
1506 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
1570 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 1507 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1571 | enum pipe pipe = crtc->pipe; | 1508 | enum pipe pipe = crtc->pipe; |
1572 | 1509 | ||
1573 | /* Disable DVO 2x clock on both PLLs if necessary */ | 1510 | /* Disable DVO 2x clock on both PLLs if necessary */ |
1574 | if (IS_I830(dev_priv) && | 1511 | if (IS_I830(dev_priv) && |
1575 | intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && | 1512 | intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) && |
1576 | !intel_num_dvo_pipes(dev_priv)) { | 1513 | !intel_num_dvo_pipes(dev_priv)) { |
1577 | I915_WRITE(DPLL(PIPE_B), | 1514 | I915_WRITE(DPLL(PIPE_B), |
1578 | I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); | 1515 | I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); |
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, | |||
1666 | I915_READ(dpll_reg) & port_mask, expected_mask); | 1603 | I915_READ(dpll_reg) & port_mask, expected_mask); |
1667 | } | 1604 | } |
1668 | 1605 | ||
1669 | static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | 1606 | static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) |
1670 | enum pipe pipe) | ||
1671 | { | 1607 | { |
1672 | struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, | 1608 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
1673 | pipe); | 1609 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
1610 | enum pipe pipe = crtc->pipe; | ||
1674 | i915_reg_t reg; | 1611 | i915_reg_t reg; |
1675 | uint32_t val, pipeconf_val; | 1612 | uint32_t val, pipeconf_val; |
1676 | 1613 | ||
1677 | /* Make sure PCH DPLL is enabled */ | 1614 | /* Make sure PCH DPLL is enabled */ |
1678 | assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); | 1615 | assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); |
1679 | 1616 | ||
1680 | /* FDI must be feeding us bits for PCH ports */ | 1617 | /* FDI must be feeding us bits for PCH ports */ |
1681 | assert_fdi_tx_enabled(dev_priv, pipe); | 1618 | assert_fdi_tx_enabled(dev_priv, pipe); |
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
1701 | * here for both 8bpc and 12bpc. | 1638 | * here for both 8bpc and 12bpc. |
1702 | */ | 1639 | */ |
1703 | val &= ~PIPECONF_BPC_MASK; | 1640 | val &= ~PIPECONF_BPC_MASK; |
1704 | if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) | 1641 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) |
1705 | val |= PIPECONF_8BPC; | 1642 | val |= PIPECONF_8BPC; |
1706 | else | 1643 | else |
1707 | val |= pipeconf_val & PIPECONF_BPC_MASK; | 1644 | val |= pipeconf_val & PIPECONF_BPC_MASK; |
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, | |||
1710 | val &= ~TRANS_INTERLACE_MASK; | 1647 | val &= ~TRANS_INTERLACE_MASK; |
1711 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) | 1648 | if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) |
1712 | if (HAS_PCH_IBX(dev_priv) && | 1649 | if (HAS_PCH_IBX(dev_priv) && |
1713 | intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) | 1650 | intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) |
1714 | val |= TRANS_LEGACY_INTERLACED_ILK; | 1651 | val |= TRANS_LEGACY_INTERLACED_ILK; |
1715 | else | 1652 | else |
1716 | val |= TRANS_INTERLACED; | 1653 | val |= TRANS_INTERLACED; |
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y, | |||
2254 | return new_offset; | 2191 | return new_offset; |
2255 | } | 2192 | } |
2256 | 2193 | ||
2194 | static bool is_surface_linear(u64 modifier, int color_plane) | ||
2195 | { | ||
2196 | return modifier == DRM_FORMAT_MOD_LINEAR; | ||
2197 | } | ||
2198 | |||
2257 | static u32 intel_adjust_aligned_offset(int *x, int *y, | 2199 | static u32 intel_adjust_aligned_offset(int *x, int *y, |
2258 | const struct drm_framebuffer *fb, | 2200 | const struct drm_framebuffer *fb, |
2259 | int color_plane, | 2201 | int color_plane, |
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, | |||
2266 | 2208 | ||
2267 | WARN_ON(new_offset > old_offset); | 2209 | WARN_ON(new_offset > old_offset); |
2268 | 2210 | ||
2269 | if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { | 2211 | if (!is_surface_linear(fb->modifier, color_plane)) { |
2270 | unsigned int tile_size, tile_width, tile_height; | 2212 | unsigned int tile_size, tile_width, tile_height; |
2271 | unsigned int pitch_tiles; | 2213 | unsigned int pitch_tiles; |
2272 | 2214 | ||
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, | |||
2330 | unsigned int rotation, | 2272 | unsigned int rotation, |
2331 | u32 alignment) | 2273 | u32 alignment) |
2332 | { | 2274 | { |
2333 | uint64_t fb_modifier = fb->modifier; | ||
2334 | unsigned int cpp = fb->format->cpp[color_plane]; | 2275 | unsigned int cpp = fb->format->cpp[color_plane]; |
2335 | u32 offset, offset_aligned; | 2276 | u32 offset, offset_aligned; |
2336 | 2277 | ||
2337 | if (alignment) | 2278 | if (alignment) |
2338 | alignment--; | 2279 | alignment--; |
2339 | 2280 | ||
2340 | if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { | 2281 | if (!is_surface_linear(fb->modifier, color_plane)) { |
2341 | unsigned int tile_size, tile_width, tile_height; | 2282 | unsigned int tile_size, tile_width, tile_height; |
2342 | unsigned int tile_rows, tiles, pitch_tiles; | 2283 | unsigned int tile_rows, tiles, pitch_tiles; |
2343 | 2284 | ||
@@ -2574,7 +2515,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, | |||
2574 | tile_size); | 2515 | tile_size); |
2575 | offset /= tile_size; | 2516 | offset /= tile_size; |
2576 | 2517 | ||
2577 | if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { | 2518 | if (!is_surface_linear(fb->modifier, i)) { |
2578 | unsigned int tile_width, tile_height; | 2519 | unsigned int tile_width, tile_height; |
2579 | unsigned int pitch_tiles; | 2520 | unsigned int pitch_tiles; |
2580 | struct drm_rect r; | 2521 | struct drm_rect r; |
@@ -2788,10 +2729,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, | |||
2788 | crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); | 2729 | crtc_state->base.plane_mask |= drm_plane_mask(&plane->base); |
2789 | else | 2730 | else |
2790 | crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); | 2731 | crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base); |
2791 | |||
2792 | DRM_DEBUG_KMS("%s active planes 0x%x\n", | ||
2793 | crtc_state->base.crtc->name, | ||
2794 | crtc_state->active_planes); | ||
2795 | } | 2732 | } |
2796 | 2733 | ||
2797 | static void fixup_active_planes(struct intel_crtc_state *crtc_state) | 2734 | static void fixup_active_planes(struct intel_crtc_state *crtc_state) |
@@ -2819,6 +2756,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, | |||
2819 | struct intel_plane_state *plane_state = | 2756 | struct intel_plane_state *plane_state = |
2820 | to_intel_plane_state(plane->base.state); | 2757 | to_intel_plane_state(plane->base.state); |
2821 | 2758 | ||
2759 | DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", | ||
2760 | plane->base.base.id, plane->base.name, | ||
2761 | crtc->base.base.id, crtc->base.name); | ||
2762 | |||
2822 | intel_set_plane_visible(crtc_state, plane_state, false); | 2763 | intel_set_plane_visible(crtc_state, plane_state, false); |
2823 | fixup_active_planes(crtc_state); | 2764 | fixup_active_planes(crtc_state); |
2824 | 2765 | ||
@@ -2890,6 +2831,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2890 | return; | 2831 | return; |
2891 | 2832 | ||
2892 | valid_fb: | 2833 | valid_fb: |
2834 | intel_state->base.rotation = plane_config->rotation; | ||
2893 | intel_fill_fb_ggtt_view(&intel_state->view, fb, | 2835 | intel_fill_fb_ggtt_view(&intel_state->view, fb, |
2894 | intel_state->base.rotation); | 2836 | intel_state->base.rotation); |
2895 | intel_state->color_plane[0].stride = | 2837 | intel_state->color_plane[0].stride = |
@@ -3098,28 +3040,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) | |||
3098 | return 0; | 3040 | return 0; |
3099 | } | 3041 | } |
3100 | 3042 | ||
3101 | static int | ||
3102 | skl_check_nv12_surface(struct intel_plane_state *plane_state) | ||
3103 | { | ||
3104 | /* Display WA #1106 */ | ||
3105 | if (plane_state->base.rotation != | ||
3106 | (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) && | ||
3107 | plane_state->base.rotation != DRM_MODE_ROTATE_270) | ||
3108 | return 0; | ||
3109 | |||
3110 | /* | ||
3111 | * src coordinates are rotated here. | ||
3112 | * We check height but report it as width | ||
3113 | */ | ||
3114 | if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) { | ||
3115 | DRM_DEBUG_KMS("src width must be multiple " | ||
3116 | "of 4 for rotated NV12\n"); | ||
3117 | return -EINVAL; | ||
3118 | } | ||
3119 | |||
3120 | return 0; | ||
3121 | } | ||
3122 | |||
3123 | static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) | 3043 | static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) |
3124 | { | 3044 | { |
3125 | const struct drm_framebuffer *fb = plane_state->base.fb; | 3045 | const struct drm_framebuffer *fb = plane_state->base.fb; |
@@ -3198,9 +3118,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) | |||
3198 | * the main surface setup depends on it. | 3118 | * the main surface setup depends on it. |
3199 | */ | 3119 | */ |
3200 | if (fb->format->format == DRM_FORMAT_NV12) { | 3120 | if (fb->format->format == DRM_FORMAT_NV12) { |
3201 | ret = skl_check_nv12_surface(plane_state); | ||
3202 | if (ret) | ||
3203 | return ret; | ||
3204 | ret = skl_check_nv12_aux_surface(plane_state); | 3121 | ret = skl_check_nv12_aux_surface(plane_state); |
3205 | if (ret) | 3122 | if (ret) |
3206 | return ret; | 3123 | return ret; |
@@ -3448,7 +3365,6 @@ static void i9xx_update_plane(struct intel_plane *plane, | |||
3448 | intel_plane_ggtt_offset(plane_state) + | 3365 | intel_plane_ggtt_offset(plane_state) + |
3449 | dspaddr_offset); | 3366 | dspaddr_offset); |
3450 | } | 3367 | } |
3451 | POSTING_READ_FW(reg); | ||
3452 | 3368 | ||
3453 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 3369 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
3454 | } | 3370 | } |
@@ -3467,7 +3383,6 @@ static void i9xx_disable_plane(struct intel_plane *plane, | |||
3467 | I915_WRITE_FW(DSPSURF(i9xx_plane), 0); | 3383 | I915_WRITE_FW(DSPSURF(i9xx_plane), 0); |
3468 | else | 3384 | else |
3469 | I915_WRITE_FW(DSPADDR(i9xx_plane), 0); | 3385 | I915_WRITE_FW(DSPADDR(i9xx_plane), 0); |
3470 | POSTING_READ_FW(DSPCNTR(i9xx_plane)); | ||
3471 | 3386 | ||
3472 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 3387 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
3473 | } | 3388 | } |
@@ -3527,13 +3442,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) | |||
3527 | /* | 3442 | /* |
3528 | * This function detaches (aka. unbinds) unused scalers in hardware | 3443 | * This function detaches (aka. unbinds) unused scalers in hardware |
3529 | */ | 3444 | */ |
3530 | static void skl_detach_scalers(struct intel_crtc *intel_crtc) | 3445 | static void skl_detach_scalers(const struct intel_crtc_state *crtc_state) |
3531 | { | 3446 | { |
3532 | struct intel_crtc_scaler_state *scaler_state; | 3447 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
3448 | const struct intel_crtc_scaler_state *scaler_state = | ||
3449 | &crtc_state->scaler_state; | ||
3533 | int i; | 3450 | int i; |
3534 | 3451 | ||
3535 | scaler_state = &intel_crtc->config->scaler_state; | ||
3536 | |||
3537 | /* loop through and disable scalers that aren't in use */ | 3452 | /* loop through and disable scalers that aren't in use */ |
3538 | for (i = 0; i < intel_crtc->num_scalers; i++) { | 3453 | for (i = 0; i < intel_crtc->num_scalers; i++) { |
3539 | if (!scaler_state->scalers[i].in_use) | 3454 | if (!scaler_state->scalers[i].in_use) |
@@ -3597,29 +3512,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format) | |||
3597 | return 0; | 3512 | return 0; |
3598 | } | 3513 | } |
3599 | 3514 | ||
3600 | /* | 3515 | static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state) |
3601 | * XXX: For ARBG/ABGR formats we default to expecting scanout buffers | ||
3602 | * to be already pre-multiplied. We need to add a knob (or a different | ||
3603 | * DRM_FORMAT) for user-space to configure that. | ||
3604 | */ | ||
3605 | static u32 skl_plane_ctl_alpha(uint32_t pixel_format) | ||
3606 | { | 3516 | { |
3607 | switch (pixel_format) { | 3517 | if (!plane_state->base.fb->format->has_alpha) |
3608 | case DRM_FORMAT_ABGR8888: | 3518 | return PLANE_CTL_ALPHA_DISABLE; |
3609 | case DRM_FORMAT_ARGB8888: | 3519 | |
3520 | switch (plane_state->base.pixel_blend_mode) { | ||
3521 | case DRM_MODE_BLEND_PIXEL_NONE: | ||
3522 | return PLANE_CTL_ALPHA_DISABLE; | ||
3523 | case DRM_MODE_BLEND_PREMULTI: | ||
3610 | return PLANE_CTL_ALPHA_SW_PREMULTIPLY; | 3524 | return PLANE_CTL_ALPHA_SW_PREMULTIPLY; |
3525 | case DRM_MODE_BLEND_COVERAGE: | ||
3526 | return PLANE_CTL_ALPHA_HW_PREMULTIPLY; | ||
3611 | default: | 3527 | default: |
3528 | MISSING_CASE(plane_state->base.pixel_blend_mode); | ||
3612 | return PLANE_CTL_ALPHA_DISABLE; | 3529 | return PLANE_CTL_ALPHA_DISABLE; |
3613 | } | 3530 | } |
3614 | } | 3531 | } |
3615 | 3532 | ||
3616 | static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format) | 3533 | static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state) |
3617 | { | 3534 | { |
3618 | switch (pixel_format) { | 3535 | if (!plane_state->base.fb->format->has_alpha) |
3619 | case DRM_FORMAT_ABGR8888: | 3536 | return PLANE_COLOR_ALPHA_DISABLE; |
3620 | case DRM_FORMAT_ARGB8888: | 3537 | |
3538 | switch (plane_state->base.pixel_blend_mode) { | ||
3539 | case DRM_MODE_BLEND_PIXEL_NONE: | ||
3540 | return PLANE_COLOR_ALPHA_DISABLE; | ||
3541 | case DRM_MODE_BLEND_PREMULTI: | ||
3621 | return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; | 3542 | return PLANE_COLOR_ALPHA_SW_PREMULTIPLY; |
3543 | case DRM_MODE_BLEND_COVERAGE: | ||
3544 | return PLANE_COLOR_ALPHA_HW_PREMULTIPLY; | ||
3622 | default: | 3545 | default: |
3546 | MISSING_CASE(plane_state->base.pixel_blend_mode); | ||
3623 | return PLANE_COLOR_ALPHA_DISABLE; | 3547 | return PLANE_COLOR_ALPHA_DISABLE; |
3624 | } | 3548 | } |
3625 | } | 3549 | } |
@@ -3696,7 +3620,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, | |||
3696 | plane_ctl = PLANE_CTL_ENABLE; | 3620 | plane_ctl = PLANE_CTL_ENABLE; |
3697 | 3621 | ||
3698 | if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { | 3622 | if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { |
3699 | plane_ctl |= skl_plane_ctl_alpha(fb->format->format); | 3623 | plane_ctl |= skl_plane_ctl_alpha(plane_state); |
3700 | plane_ctl |= | 3624 | plane_ctl |= |
3701 | PLANE_CTL_PIPE_GAMMA_ENABLE | | 3625 | PLANE_CTL_PIPE_GAMMA_ENABLE | |
3702 | PLANE_CTL_PIPE_CSC_ENABLE | | 3626 | PLANE_CTL_PIPE_CSC_ENABLE | |
@@ -3731,6 +3655,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, | |||
3731 | struct drm_i915_private *dev_priv = | 3655 | struct drm_i915_private *dev_priv = |
3732 | to_i915(plane_state->base.plane->dev); | 3656 | to_i915(plane_state->base.plane->dev); |
3733 | const struct drm_framebuffer *fb = plane_state->base.fb; | 3657 | const struct drm_framebuffer *fb = plane_state->base.fb; |
3658 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
3734 | u32 plane_color_ctl = 0; | 3659 | u32 plane_color_ctl = 0; |
3735 | 3660 | ||
3736 | if (INTEL_GEN(dev_priv) < 11) { | 3661 | if (INTEL_GEN(dev_priv) < 11) { |
@@ -3738,9 +3663,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, | |||
3738 | plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; | 3663 | plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; |
3739 | } | 3664 | } |
3740 | plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; | 3665 | plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE; |
3741 | plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format); | 3666 | plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state); |
3742 | 3667 | ||
3743 | if (fb->format->is_yuv) { | 3668 | if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) { |
3744 | if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) | 3669 | if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) |
3745 | plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; | 3670 | plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709; |
3746 | else | 3671 | else |
@@ -3748,6 +3673,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, | |||
3748 | 3673 | ||
3749 | if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) | 3674 | if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) |
3750 | plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; | 3675 | plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; |
3676 | } else if (fb->format->is_yuv) { | ||
3677 | plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; | ||
3751 | } | 3678 | } |
3752 | 3679 | ||
3753 | return plane_color_ctl; | 3680 | return plane_color_ctl; |
@@ -3932,15 +3859,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta | |||
3932 | 3859 | ||
3933 | /* on skylake this is done by detaching scalers */ | 3860 | /* on skylake this is done by detaching scalers */ |
3934 | if (INTEL_GEN(dev_priv) >= 9) { | 3861 | if (INTEL_GEN(dev_priv) >= 9) { |
3935 | skl_detach_scalers(crtc); | 3862 | skl_detach_scalers(new_crtc_state); |
3936 | 3863 | ||
3937 | if (new_crtc_state->pch_pfit.enabled) | 3864 | if (new_crtc_state->pch_pfit.enabled) |
3938 | skylake_pfit_enable(crtc); | 3865 | skylake_pfit_enable(new_crtc_state); |
3939 | } else if (HAS_PCH_SPLIT(dev_priv)) { | 3866 | } else if (HAS_PCH_SPLIT(dev_priv)) { |
3940 | if (new_crtc_state->pch_pfit.enabled) | 3867 | if (new_crtc_state->pch_pfit.enabled) |
3941 | ironlake_pfit_enable(crtc); | 3868 | ironlake_pfit_enable(new_crtc_state); |
3942 | else if (old_crtc_state->pch_pfit.enabled) | 3869 | else if (old_crtc_state->pch_pfit.enabled) |
3943 | ironlake_pfit_disable(crtc, true); | 3870 | ironlake_pfit_disable(old_crtc_state); |
3944 | } | 3871 | } |
3945 | } | 3872 | } |
3946 | 3873 | ||
@@ -4339,10 +4266,10 @@ train_done: | |||
4339 | DRM_DEBUG_KMS("FDI train done.\n"); | 4266 | DRM_DEBUG_KMS("FDI train done.\n"); |
4340 | } | 4267 | } |
4341 | 4268 | ||
4342 | static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) | 4269 | static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) |
4343 | { | 4270 | { |
4344 | struct drm_device *dev = intel_crtc->base.dev; | 4271 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
4345 | struct drm_i915_private *dev_priv = to_i915(dev); | 4272 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); |
4346 | int pipe = intel_crtc->pipe; | 4273 | int pipe = intel_crtc->pipe; |
4347 | i915_reg_t reg; | 4274 | i915_reg_t reg; |
4348 | u32 temp; | 4275 | u32 temp; |
@@ -4351,7 +4278,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) | |||
4351 | reg = FDI_RX_CTL(pipe); | 4278 | reg = FDI_RX_CTL(pipe); |
4352 | temp = I915_READ(reg); | 4279 | temp = I915_READ(reg); |
4353 | temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); | 4280 | temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); |
4354 | temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); | 4281 | temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); |
4355 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; | 4282 | temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
4356 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | 4283 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
4357 | 4284 | ||
@@ -4500,10 +4427,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv) | |||
4500 | } | 4427 | } |
4501 | 4428 | ||
4502 | /* Program iCLKIP clock to the desired frequency */ | 4429 | /* Program iCLKIP clock to the desired frequency */ |
4503 | static void lpt_program_iclkip(struct intel_crtc *crtc) | 4430 | static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) |
4504 | { | 4431 | { |
4432 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
4505 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 4433 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
4506 | int clock = crtc->config->base.adjusted_mode.crtc_clock; | 4434 | int clock = crtc_state->base.adjusted_mode.crtc_clock; |
4507 | u32 divsel, phaseinc, auxdiv, phasedir = 0; | 4435 | u32 divsel, phaseinc, auxdiv, phasedir = 0; |
4508 | u32 temp; | 4436 | u32 temp; |
4509 | 4437 | ||
@@ -4614,12 +4542,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv) | |||
4614 | desired_divisor << auxdiv); | 4542 | desired_divisor << auxdiv); |
4615 | } | 4543 | } |
4616 | 4544 | ||
4617 | static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, | 4545 | static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, |
4618 | enum pipe pch_transcoder) | 4546 | enum pipe pch_transcoder) |
4619 | { | 4547 | { |
4620 | struct drm_device *dev = crtc->base.dev; | 4548 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
4621 | struct drm_i915_private *dev_priv = to_i915(dev); | 4549 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
4622 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; | 4550 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
4623 | 4551 | ||
4624 | I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), | 4552 | I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), |
4625 | I915_READ(HTOTAL(cpu_transcoder))); | 4553 | I915_READ(HTOTAL(cpu_transcoder))); |
@@ -4638,9 +4566,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, | |||
4638 | I915_READ(VSYNCSHIFT(cpu_transcoder))); | 4566 | I915_READ(VSYNCSHIFT(cpu_transcoder))); |
4639 | } | 4567 | } |
4640 | 4568 | ||
4641 | static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) | 4569 | static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable) |
4642 | { | 4570 | { |
4643 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
4644 | uint32_t temp; | 4571 | uint32_t temp; |
4645 | 4572 | ||
4646 | temp = I915_READ(SOUTH_CHICKEN1); | 4573 | temp = I915_READ(SOUTH_CHICKEN1); |
@@ -4659,22 +4586,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) | |||
4659 | POSTING_READ(SOUTH_CHICKEN1); | 4586 | POSTING_READ(SOUTH_CHICKEN1); |
4660 | } | 4587 | } |
4661 | 4588 | ||
4662 | static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) | 4589 | static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state) |
4663 | { | 4590 | { |
4664 | struct drm_device *dev = intel_crtc->base.dev; | 4591 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
4592 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
4665 | 4593 | ||
4666 | switch (intel_crtc->pipe) { | 4594 | switch (crtc->pipe) { |
4667 | case PIPE_A: | 4595 | case PIPE_A: |
4668 | break; | 4596 | break; |
4669 | case PIPE_B: | 4597 | case PIPE_B: |
4670 | if (intel_crtc->config->fdi_lanes > 2) | 4598 | if (crtc_state->fdi_lanes > 2) |
4671 | cpt_set_fdi_bc_bifurcation(dev, false); | 4599 | cpt_set_fdi_bc_bifurcation(dev_priv, false); |
4672 | else | 4600 | else |
4673 | cpt_set_fdi_bc_bifurcation(dev, true); | 4601 | cpt_set_fdi_bc_bifurcation(dev_priv, true); |
4674 | 4602 | ||
4675 | break; | 4603 | break; |
4676 | case PIPE_C: | 4604 | case PIPE_C: |
4677 | cpt_set_fdi_bc_bifurcation(dev, true); | 4605 | cpt_set_fdi_bc_bifurcation(dev_priv, true); |
4678 | 4606 | ||
4679 | break; | 4607 | break; |
4680 | default: | 4608 | default: |
@@ -4731,7 +4659,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, | |||
4731 | assert_pch_transcoder_disabled(dev_priv, pipe); | 4659 | assert_pch_transcoder_disabled(dev_priv, pipe); |
4732 | 4660 | ||
4733 | if (IS_IVYBRIDGE(dev_priv)) | 4661 | if (IS_IVYBRIDGE(dev_priv)) |
4734 | ivybridge_update_fdi_bc_bifurcation(crtc); | 4662 | ivybridge_update_fdi_bc_bifurcation(crtc_state); |
4735 | 4663 | ||
4736 | /* Write the TU size bits before fdi link training, so that error | 4664 | /* Write the TU size bits before fdi link training, so that error |
4737 | * detection works. */ | 4665 | * detection works. */ |
@@ -4764,11 +4692,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, | |||
4764 | * Note that enable_shared_dpll tries to do the right thing, but | 4692 | * Note that enable_shared_dpll tries to do the right thing, but |
4765 | * get_shared_dpll unconditionally resets the pll - we need that to have | 4693 | * get_shared_dpll unconditionally resets the pll - we need that to have |
4766 | * the right LVDS enable sequence. */ | 4694 | * the right LVDS enable sequence. */ |
4767 | intel_enable_shared_dpll(crtc); | 4695 | intel_enable_shared_dpll(crtc_state); |
4768 | 4696 | ||
4769 | /* set transcoder timing, panel must allow it */ | 4697 | /* set transcoder timing, panel must allow it */ |
4770 | assert_panel_unlocked(dev_priv, pipe); | 4698 | assert_panel_unlocked(dev_priv, pipe); |
4771 | ironlake_pch_transcoder_set_timings(crtc, pipe); | 4699 | ironlake_pch_transcoder_set_timings(crtc_state, pipe); |
4772 | 4700 | ||
4773 | intel_fdi_normal_train(crtc); | 4701 | intel_fdi_normal_train(crtc); |
4774 | 4702 | ||
@@ -4800,7 +4728,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, | |||
4800 | I915_WRITE(reg, temp); | 4728 | I915_WRITE(reg, temp); |
4801 | } | 4729 | } |
4802 | 4730 | ||
4803 | ironlake_enable_pch_transcoder(dev_priv, pipe); | 4731 | ironlake_enable_pch_transcoder(crtc_state); |
4804 | } | 4732 | } |
4805 | 4733 | ||
4806 | static void lpt_pch_enable(const struct intel_atomic_state *state, | 4734 | static void lpt_pch_enable(const struct intel_atomic_state *state, |
@@ -4812,10 +4740,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, | |||
4812 | 4740 | ||
4813 | assert_pch_transcoder_disabled(dev_priv, PIPE_A); | 4741 | assert_pch_transcoder_disabled(dev_priv, PIPE_A); |
4814 | 4742 | ||
4815 | lpt_program_iclkip(crtc); | 4743 | lpt_program_iclkip(crtc_state); |
4816 | 4744 | ||
4817 | /* Set transcoder timing. */ | 4745 | /* Set transcoder timing. */ |
4818 | ironlake_pch_transcoder_set_timings(crtc, PIPE_A); | 4746 | ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A); |
4819 | 4747 | ||
4820 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); | 4748 | lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); |
4821 | } | 4749 | } |
@@ -4850,8 +4778,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe) | |||
4850 | * chroma samples for both of the luma samples, and thus we don't | 4778 | * chroma samples for both of the luma samples, and thus we don't |
4851 | * actually get the expected MPEG2 chroma siting convention :( | 4779 | * actually get the expected MPEG2 chroma siting convention :( |
4852 | * The same behaviour is observed on pre-SKL platforms as well. | 4780 | * The same behaviour is observed on pre-SKL platforms as well. |
4781 | * | ||
4782 | * Theory behind the formula (note that we ignore sub-pixel | ||
4783 | * source coordinates): | ||
4784 | * s = source sample position | ||
4785 | * d = destination sample position | ||
4786 | * | ||
4787 | * Downscaling 4:1: | ||
4788 | * -0.5 | ||
4789 | * | 0.0 | ||
4790 | * | | 1.5 (initial phase) | ||
4791 | * | | | | ||
4792 | * v v v | ||
4793 | * | s | s | s | s | | ||
4794 | * | d | | ||
4795 | * | ||
4796 | * Upscaling 1:4: | ||
4797 | * -0.5 | ||
4798 | * | -0.375 (initial phase) | ||
4799 | * | | 0.0 | ||
4800 | * | | | | ||
4801 | * v v v | ||
4802 | * | s | | ||
4803 | * | d | d | d | d | | ||
4853 | */ | 4804 | */ |
4854 | u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) | 4805 | u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited) |
4855 | { | 4806 | { |
4856 | int phase = -0x8000; | 4807 | int phase = -0x8000; |
4857 | u16 trip = 0; | 4808 | u16 trip = 0; |
@@ -4859,6 +4810,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) | |||
4859 | if (chroma_cosited) | 4810 | if (chroma_cosited) |
4860 | phase += (sub - 1) * 0x8000 / sub; | 4811 | phase += (sub - 1) * 0x8000 / sub; |
4861 | 4812 | ||
4813 | phase += scale / (2 * sub); | ||
4814 | |||
4815 | /* | ||
4816 | * Hardware initial phase limited to [-0.5:1.5]. | ||
4817 | * Since the max hardware scale factor is 3.0, we | ||
4818 | * should never actually excdeed 1.0 here. | ||
4819 | */ | ||
4820 | WARN_ON(phase < -0x8000 || phase > 0x18000); | ||
4821 | |||
4862 | if (phase < 0) | 4822 | if (phase < 0) |
4863 | phase = 0x10000 + phase; | 4823 | phase = 0x10000 + phase; |
4864 | else | 4824 | else |
@@ -4871,8 +4831,7 @@ static int | |||
4871 | skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | 4831 | skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, |
4872 | unsigned int scaler_user, int *scaler_id, | 4832 | unsigned int scaler_user, int *scaler_id, |
4873 | int src_w, int src_h, int dst_w, int dst_h, | 4833 | int src_w, int src_h, int dst_w, int dst_h, |
4874 | bool plane_scaler_check, | 4834 | const struct drm_format_info *format, bool need_scaler) |
4875 | uint32_t pixel_format) | ||
4876 | { | 4835 | { |
4877 | struct intel_crtc_scaler_state *scaler_state = | 4836 | struct intel_crtc_scaler_state *scaler_state = |
4878 | &crtc_state->scaler_state; | 4837 | &crtc_state->scaler_state; |
@@ -4881,21 +4840,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
4881 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); | 4840 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); |
4882 | const struct drm_display_mode *adjusted_mode = | 4841 | const struct drm_display_mode *adjusted_mode = |
4883 | &crtc_state->base.adjusted_mode; | 4842 | &crtc_state->base.adjusted_mode; |
4884 | int need_scaling; | ||
4885 | 4843 | ||
4886 | /* | 4844 | /* |
4887 | * Src coordinates are already rotated by 270 degrees for | 4845 | * Src coordinates are already rotated by 270 degrees for |
4888 | * the 90/270 degree plane rotation cases (to match the | 4846 | * the 90/270 degree plane rotation cases (to match the |
4889 | * GTT mapping), hence no need to account for rotation here. | 4847 | * GTT mapping), hence no need to account for rotation here. |
4890 | */ | 4848 | */ |
4891 | need_scaling = src_w != dst_w || src_h != dst_h; | 4849 | if (src_w != dst_w || src_h != dst_h) |
4892 | 4850 | need_scaler = true; | |
4893 | if (plane_scaler_check) | ||
4894 | if (pixel_format == DRM_FORMAT_NV12) | ||
4895 | need_scaling = true; | ||
4896 | |||
4897 | if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX) | ||
4898 | need_scaling = true; | ||
4899 | 4851 | ||
4900 | /* | 4852 | /* |
4901 | * Scaling/fitting not supported in IF-ID mode in GEN9+ | 4853 | * Scaling/fitting not supported in IF-ID mode in GEN9+ |
@@ -4904,7 +4856,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
4904 | * for NV12. | 4856 | * for NV12. |
4905 | */ | 4857 | */ |
4906 | if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && | 4858 | if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && |
4907 | need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { | 4859 | need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
4908 | DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); | 4860 | DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); |
4909 | return -EINVAL; | 4861 | return -EINVAL; |
4910 | } | 4862 | } |
@@ -4919,7 +4871,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
4919 | * update to free the scaler is done in plane/panel-fit programming. | 4871 | * update to free the scaler is done in plane/panel-fit programming. |
4920 | * For this purpose crtc/plane_state->scaler_id isn't reset here. | 4872 | * For this purpose crtc/plane_state->scaler_id isn't reset here. |
4921 | */ | 4873 | */ |
4922 | if (force_detach || !need_scaling) { | 4874 | if (force_detach || !need_scaler) { |
4923 | if (*scaler_id >= 0) { | 4875 | if (*scaler_id >= 0) { |
4924 | scaler_state->scaler_users &= ~(1 << scaler_user); | 4876 | scaler_state->scaler_users &= ~(1 << scaler_user); |
4925 | scaler_state->scalers[*scaler_id].in_use = 0; | 4877 | scaler_state->scalers[*scaler_id].in_use = 0; |
@@ -4933,7 +4885,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
4933 | return 0; | 4885 | return 0; |
4934 | } | 4886 | } |
4935 | 4887 | ||
4936 | if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 && | 4888 | if (format && format->format == DRM_FORMAT_NV12 && |
4937 | (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { | 4889 | (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { |
4938 | DRM_DEBUG_KMS("NV12: src dimensions not met\n"); | 4890 | DRM_DEBUG_KMS("NV12: src dimensions not met\n"); |
4939 | return -EINVAL; | 4891 | return -EINVAL; |
@@ -4976,12 +4928,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, | |||
4976 | int skl_update_scaler_crtc(struct intel_crtc_state *state) | 4928 | int skl_update_scaler_crtc(struct intel_crtc_state *state) |
4977 | { | 4929 | { |
4978 | const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; | 4930 | const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; |
4931 | bool need_scaler = false; | ||
4932 | |||
4933 | if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) | ||
4934 | need_scaler = true; | ||
4979 | 4935 | ||
4980 | return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, | 4936 | return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, |
4981 | &state->scaler_state.scaler_id, | 4937 | &state->scaler_state.scaler_id, |
4982 | state->pipe_src_w, state->pipe_src_h, | 4938 | state->pipe_src_w, state->pipe_src_h, |
4983 | adjusted_mode->crtc_hdisplay, | 4939 | adjusted_mode->crtc_hdisplay, |
4984 | adjusted_mode->crtc_vdisplay, false, 0); | 4940 | adjusted_mode->crtc_vdisplay, NULL, need_scaler); |
4985 | } | 4941 | } |
4986 | 4942 | ||
4987 | /** | 4943 | /** |
@@ -4996,13 +4952,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state) | |||
4996 | static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, | 4952 | static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, |
4997 | struct intel_plane_state *plane_state) | 4953 | struct intel_plane_state *plane_state) |
4998 | { | 4954 | { |
4999 | |||
5000 | struct intel_plane *intel_plane = | 4955 | struct intel_plane *intel_plane = |
5001 | to_intel_plane(plane_state->base.plane); | 4956 | to_intel_plane(plane_state->base.plane); |
5002 | struct drm_framebuffer *fb = plane_state->base.fb; | 4957 | struct drm_framebuffer *fb = plane_state->base.fb; |
5003 | int ret; | 4958 | int ret; |
5004 | |||
5005 | bool force_detach = !fb || !plane_state->base.visible; | 4959 | bool force_detach = !fb || !plane_state->base.visible; |
4960 | bool need_scaler = false; | ||
4961 | |||
4962 | /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ | ||
4963 | if (!icl_is_hdr_plane(intel_plane) && | ||
4964 | fb && fb->format->format == DRM_FORMAT_NV12) | ||
4965 | need_scaler = true; | ||
5006 | 4966 | ||
5007 | ret = skl_update_scaler(crtc_state, force_detach, | 4967 | ret = skl_update_scaler(crtc_state, force_detach, |
5008 | drm_plane_index(&intel_plane->base), | 4968 | drm_plane_index(&intel_plane->base), |
@@ -5011,7 +4971,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, | |||
5011 | drm_rect_height(&plane_state->base.src) >> 16, | 4971 | drm_rect_height(&plane_state->base.src) >> 16, |
5012 | drm_rect_width(&plane_state->base.dst), | 4972 | drm_rect_width(&plane_state->base.dst), |
5013 | drm_rect_height(&plane_state->base.dst), | 4973 | drm_rect_height(&plane_state->base.dst), |
5014 | fb ? true : false, fb ? fb->format->format : 0); | 4974 | fb ? fb->format : NULL, need_scaler); |
5015 | 4975 | ||
5016 | if (ret || plane_state->scaler_id < 0) | 4976 | if (ret || plane_state->scaler_id < 0) |
5017 | return ret; | 4977 | return ret; |
@@ -5057,23 +5017,30 @@ static void skylake_scaler_disable(struct intel_crtc *crtc) | |||
5057 | skl_detach_scaler(crtc, i); | 5017 | skl_detach_scaler(crtc, i); |
5058 | } | 5018 | } |
5059 | 5019 | ||
5060 | static void skylake_pfit_enable(struct intel_crtc *crtc) | 5020 | static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state) |
5061 | { | 5021 | { |
5062 | struct drm_device *dev = crtc->base.dev; | 5022 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
5063 | struct drm_i915_private *dev_priv = to_i915(dev); | 5023 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5064 | int pipe = crtc->pipe; | 5024 | enum pipe pipe = crtc->pipe; |
5065 | struct intel_crtc_scaler_state *scaler_state = | 5025 | const struct intel_crtc_scaler_state *scaler_state = |
5066 | &crtc->config->scaler_state; | 5026 | &crtc_state->scaler_state; |
5067 | 5027 | ||
5068 | if (crtc->config->pch_pfit.enabled) { | 5028 | if (crtc_state->pch_pfit.enabled) { |
5069 | u16 uv_rgb_hphase, uv_rgb_vphase; | 5029 | u16 uv_rgb_hphase, uv_rgb_vphase; |
5030 | int pfit_w, pfit_h, hscale, vscale; | ||
5070 | int id; | 5031 | int id; |
5071 | 5032 | ||
5072 | if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) | 5033 | if (WARN_ON(crtc_state->scaler_state.scaler_id < 0)) |
5073 | return; | 5034 | return; |
5074 | 5035 | ||
5075 | uv_rgb_hphase = skl_scaler_calc_phase(1, false); | 5036 | pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF; |
5076 | uv_rgb_vphase = skl_scaler_calc_phase(1, false); | 5037 | pfit_h = crtc_state->pch_pfit.size & 0xFFFF; |
5038 | |||
5039 | hscale = (crtc_state->pipe_src_w << 16) / pfit_w; | ||
5040 | vscale = (crtc_state->pipe_src_h << 16) / pfit_h; | ||
5041 | |||
5042 | uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); | ||
5043 | uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); | ||
5077 | 5044 | ||
5078 | id = scaler_state->scaler_id; | 5045 | id = scaler_state->scaler_id; |
5079 | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | | 5046 | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | |
@@ -5082,18 +5049,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) | |||
5082 | PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); | 5049 | PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase)); |
5083 | I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), | 5050 | I915_WRITE_FW(SKL_PS_HPHASE(pipe, id), |
5084 | PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); | 5051 | PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase)); |
5085 | I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); | 5052 | I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos); |
5086 | I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); | 5053 | I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size); |
5087 | } | 5054 | } |
5088 | } | 5055 | } |
5089 | 5056 | ||
5090 | static void ironlake_pfit_enable(struct intel_crtc *crtc) | 5057 | static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) |
5091 | { | 5058 | { |
5092 | struct drm_device *dev = crtc->base.dev; | 5059 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
5093 | struct drm_i915_private *dev_priv = to_i915(dev); | 5060 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5094 | int pipe = crtc->pipe; | 5061 | int pipe = crtc->pipe; |
5095 | 5062 | ||
5096 | if (crtc->config->pch_pfit.enabled) { | 5063 | if (crtc_state->pch_pfit.enabled) { |
5097 | /* Force use of hard-coded filter coefficients | 5064 | /* Force use of hard-coded filter coefficients |
5098 | * as some pre-programmed values are broken, | 5065 | * as some pre-programmed values are broken, |
5099 | * e.g. x201. | 5066 | * e.g. x201. |
@@ -5103,8 +5070,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) | |||
5103 | PF_PIPE_SEL_IVB(pipe)); | 5070 | PF_PIPE_SEL_IVB(pipe)); |
5104 | else | 5071 | else |
5105 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); | 5072 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
5106 | I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); | 5073 | I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos); |
5107 | I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); | 5074 | I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size); |
5108 | } | 5075 | } |
5109 | } | 5076 | } |
5110 | 5077 | ||
@@ -5299,11 +5266,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv, | |||
5299 | if (!crtc_state->nv12_planes) | 5266 | if (!crtc_state->nv12_planes) |
5300 | return false; | 5267 | return false; |
5301 | 5268 | ||
5302 | if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) | 5269 | /* WA Display #0827: Gen9:all */ |
5303 | return false; | 5270 | if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) |
5304 | |||
5305 | if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) || | ||
5306 | IS_CANNONLAKE(dev_priv)) | ||
5307 | return true; | 5271 | return true; |
5308 | 5272 | ||
5309 | return false; | 5273 | return false; |
@@ -5346,7 +5310,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) | |||
5346 | if (needs_nv12_wa(dev_priv, old_crtc_state) && | 5310 | if (needs_nv12_wa(dev_priv, old_crtc_state) && |
5347 | !needs_nv12_wa(dev_priv, pipe_config)) { | 5311 | !needs_nv12_wa(dev_priv, pipe_config)) { |
5348 | skl_wa_clkgate(dev_priv, crtc->pipe, false); | 5312 | skl_wa_clkgate(dev_priv, crtc->pipe, false); |
5349 | skl_wa_528(dev_priv, crtc->pipe, false); | ||
5350 | } | 5313 | } |
5351 | } | 5314 | } |
5352 | 5315 | ||
@@ -5386,7 +5349,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, | |||
5386 | if (!needs_nv12_wa(dev_priv, old_crtc_state) && | 5349 | if (!needs_nv12_wa(dev_priv, old_crtc_state) && |
5387 | needs_nv12_wa(dev_priv, pipe_config)) { | 5350 | needs_nv12_wa(dev_priv, pipe_config)) { |
5388 | skl_wa_clkgate(dev_priv, crtc->pipe, true); | 5351 | skl_wa_clkgate(dev_priv, crtc->pipe, true); |
5389 | skl_wa_528(dev_priv, crtc->pipe, true); | ||
5390 | } | 5352 | } |
5391 | 5353 | ||
5392 | /* | 5354 | /* |
@@ -5409,7 +5371,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, | |||
5409 | * | 5371 | * |
5410 | * WaCxSRDisabledForSpriteScaling:ivb | 5372 | * WaCxSRDisabledForSpriteScaling:ivb |
5411 | */ | 5373 | */ |
5412 | if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) | 5374 | if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) && |
5375 | old_crtc_state->base.active) | ||
5413 | intel_wait_for_vblank(dev_priv, crtc->pipe); | 5376 | intel_wait_for_vblank(dev_priv, crtc->pipe); |
5414 | 5377 | ||
5415 | /* | 5378 | /* |
@@ -5440,24 +5403,23 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, | |||
5440 | intel_update_watermarks(crtc); | 5403 | intel_update_watermarks(crtc); |
5441 | } | 5404 | } |
5442 | 5405 | ||
5443 | static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) | 5406 | static void intel_crtc_disable_planes(struct intel_crtc *crtc, unsigned plane_mask) |
5444 | { | 5407 | { |
5445 | struct drm_device *dev = crtc->dev; | 5408 | struct drm_device *dev = crtc->base.dev; |
5446 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5409 | struct intel_plane *plane; |
5447 | struct drm_plane *p; | 5410 | unsigned fb_bits = 0; |
5448 | int pipe = intel_crtc->pipe; | ||
5449 | 5411 | ||
5450 | intel_crtc_dpms_overlay_disable(intel_crtc); | 5412 | intel_crtc_dpms_overlay_disable(crtc); |
5451 | 5413 | ||
5452 | drm_for_each_plane_mask(p, dev, plane_mask) | 5414 | for_each_intel_plane_on_crtc(dev, crtc, plane) { |
5453 | to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); | 5415 | if (plane_mask & BIT(plane->id)) { |
5416 | plane->disable_plane(plane, crtc); | ||
5454 | 5417 | ||
5455 | /* | 5418 | fb_bits |= plane->frontbuffer_bit; |
5456 | * FIXME: Once we grow proper nuclear flip support out of this we need | 5419 | } |
5457 | * to compute the mask of flip planes precisely. For the time being | 5420 | } |
5458 | * consider this a flip to a NULL plane. | 5421 | |
5459 | */ | 5422 | intel_frontbuffer_flip(to_i915(dev), fb_bits); |
5460 | intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); | ||
5461 | } | 5423 | } |
5462 | 5424 | ||
5463 | static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, | 5425 | static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, |
@@ -5515,7 +5477,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc, | |||
5515 | if (conn_state->crtc != crtc) | 5477 | if (conn_state->crtc != crtc) |
5516 | continue; | 5478 | continue; |
5517 | 5479 | ||
5518 | encoder->enable(encoder, crtc_state, conn_state); | 5480 | if (encoder->enable) |
5481 | encoder->enable(encoder, crtc_state, conn_state); | ||
5519 | intel_opregion_notify_encoder(encoder, true); | 5482 | intel_opregion_notify_encoder(encoder, true); |
5520 | } | 5483 | } |
5521 | } | 5484 | } |
@@ -5536,7 +5499,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc, | |||
5536 | continue; | 5499 | continue; |
5537 | 5500 | ||
5538 | intel_opregion_notify_encoder(encoder, false); | 5501 | intel_opregion_notify_encoder(encoder, false); |
5539 | encoder->disable(encoder, old_crtc_state, old_conn_state); | 5502 | if (encoder->disable) |
5503 | encoder->disable(encoder, old_crtc_state, old_conn_state); | ||
5540 | } | 5504 | } |
5541 | } | 5505 | } |
5542 | 5506 | ||
@@ -5607,37 +5571,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5607 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); | 5571 | intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
5608 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); | 5572 | intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); |
5609 | 5573 | ||
5610 | if (intel_crtc->config->has_pch_encoder) | 5574 | if (pipe_config->has_pch_encoder) |
5611 | intel_prepare_shared_dpll(intel_crtc); | 5575 | intel_prepare_shared_dpll(pipe_config); |
5612 | 5576 | ||
5613 | if (intel_crtc_has_dp_encoder(intel_crtc->config)) | 5577 | if (intel_crtc_has_dp_encoder(pipe_config)) |
5614 | intel_dp_set_m_n(intel_crtc, M1_N1); | 5578 | intel_dp_set_m_n(pipe_config, M1_N1); |
5615 | 5579 | ||
5616 | intel_set_pipe_timings(intel_crtc); | 5580 | intel_set_pipe_timings(pipe_config); |
5617 | intel_set_pipe_src_size(intel_crtc); | 5581 | intel_set_pipe_src_size(pipe_config); |
5618 | 5582 | ||
5619 | if (intel_crtc->config->has_pch_encoder) { | 5583 | if (pipe_config->has_pch_encoder) { |
5620 | intel_cpu_transcoder_set_m_n(intel_crtc, | 5584 | intel_cpu_transcoder_set_m_n(pipe_config, |
5621 | &intel_crtc->config->fdi_m_n, NULL); | 5585 | &pipe_config->fdi_m_n, NULL); |
5622 | } | 5586 | } |
5623 | 5587 | ||
5624 | ironlake_set_pipeconf(crtc); | 5588 | ironlake_set_pipeconf(pipe_config); |
5625 | 5589 | ||
5626 | intel_crtc->active = true; | 5590 | intel_crtc->active = true; |
5627 | 5591 | ||
5628 | intel_encoders_pre_enable(crtc, pipe_config, old_state); | 5592 | intel_encoders_pre_enable(crtc, pipe_config, old_state); |
5629 | 5593 | ||
5630 | if (intel_crtc->config->has_pch_encoder) { | 5594 | if (pipe_config->has_pch_encoder) { |
5631 | /* Note: FDI PLL enabling _must_ be done before we enable the | 5595 | /* Note: FDI PLL enabling _must_ be done before we enable the |
5632 | * cpu pipes, hence this is separate from all the other fdi/pch | 5596 | * cpu pipes, hence this is separate from all the other fdi/pch |
5633 | * enabling. */ | 5597 | * enabling. */ |
5634 | ironlake_fdi_pll_enable(intel_crtc); | 5598 | ironlake_fdi_pll_enable(pipe_config); |
5635 | } else { | 5599 | } else { |
5636 | assert_fdi_tx_disabled(dev_priv, pipe); | 5600 | assert_fdi_tx_disabled(dev_priv, pipe); |
5637 | assert_fdi_rx_disabled(dev_priv, pipe); | 5601 | assert_fdi_rx_disabled(dev_priv, pipe); |
5638 | } | 5602 | } |
5639 | 5603 | ||
5640 | ironlake_pfit_enable(intel_crtc); | 5604 | ironlake_pfit_enable(pipe_config); |
5641 | 5605 | ||
5642 | /* | 5606 | /* |
5643 | * On ILK+ LUT must be loaded before the pipe is running but with | 5607 | * On ILK+ LUT must be loaded before the pipe is running but with |
@@ -5646,10 +5610,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5646 | intel_color_load_luts(&pipe_config->base); | 5610 | intel_color_load_luts(&pipe_config->base); |
5647 | 5611 | ||
5648 | if (dev_priv->display.initial_watermarks != NULL) | 5612 | if (dev_priv->display.initial_watermarks != NULL) |
5649 | dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); | 5613 | dev_priv->display.initial_watermarks(old_intel_state, pipe_config); |
5650 | intel_enable_pipe(pipe_config); | 5614 | intel_enable_pipe(pipe_config); |
5651 | 5615 | ||
5652 | if (intel_crtc->config->has_pch_encoder) | 5616 | if (pipe_config->has_pch_encoder) |
5653 | ironlake_pch_enable(old_intel_state, pipe_config); | 5617 | ironlake_pch_enable(old_intel_state, pipe_config); |
5654 | 5618 | ||
5655 | assert_vblank_disabled(crtc); | 5619 | assert_vblank_disabled(crtc); |
@@ -5666,7 +5630,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5666 | * some interlaced HDMI modes. Let's do the double wait always | 5630 | * some interlaced HDMI modes. Let's do the double wait always |
5667 | * in case there are more corner cases we don't know about. | 5631 | * in case there are more corner cases we don't know about. |
5668 | */ | 5632 | */ |
5669 | if (intel_crtc->config->has_pch_encoder) { | 5633 | if (pipe_config->has_pch_encoder) { |
5670 | intel_wait_for_vblank(dev_priv, pipe); | 5634 | intel_wait_for_vblank(dev_priv, pipe); |
5671 | intel_wait_for_vblank(dev_priv, pipe); | 5635 | intel_wait_for_vblank(dev_priv, pipe); |
5672 | } | 5636 | } |
@@ -5700,10 +5664,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc) | |||
5700 | enum pipe pipe = crtc->pipe; | 5664 | enum pipe pipe = crtc->pipe; |
5701 | uint32_t val; | 5665 | uint32_t val; |
5702 | 5666 | ||
5703 | val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2); | 5667 | val = MBUS_DBOX_A_CREDIT(2); |
5704 | 5668 | val |= MBUS_DBOX_BW_CREDIT(1); | |
5705 | /* Program B credit equally to all pipes */ | 5669 | val |= MBUS_DBOX_B_CREDIT(8); |
5706 | val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes); | ||
5707 | 5670 | ||
5708 | I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); | 5671 | I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); |
5709 | } | 5672 | } |
@@ -5715,7 +5678,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5715 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 5678 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); |
5716 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 5679 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5717 | int pipe = intel_crtc->pipe, hsw_workaround_pipe; | 5680 | int pipe = intel_crtc->pipe, hsw_workaround_pipe; |
5718 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 5681 | enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; |
5719 | struct intel_atomic_state *old_intel_state = | 5682 | struct intel_atomic_state *old_intel_state = |
5720 | to_intel_atomic_state(old_state); | 5683 | to_intel_atomic_state(old_state); |
5721 | bool psl_clkgate_wa; | 5684 | bool psl_clkgate_wa; |
@@ -5726,37 +5689,37 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5726 | 5689 | ||
5727 | intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); | 5690 | intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); |
5728 | 5691 | ||
5729 | if (intel_crtc->config->shared_dpll) | 5692 | if (pipe_config->shared_dpll) |
5730 | intel_enable_shared_dpll(intel_crtc); | 5693 | intel_enable_shared_dpll(pipe_config); |
5731 | 5694 | ||
5732 | if (INTEL_GEN(dev_priv) >= 11) | 5695 | if (INTEL_GEN(dev_priv) >= 11) |
5733 | icl_map_plls_to_ports(crtc, pipe_config, old_state); | 5696 | icl_map_plls_to_ports(crtc, pipe_config, old_state); |
5734 | 5697 | ||
5735 | intel_encoders_pre_enable(crtc, pipe_config, old_state); | 5698 | intel_encoders_pre_enable(crtc, pipe_config, old_state); |
5736 | 5699 | ||
5737 | if (intel_crtc_has_dp_encoder(intel_crtc->config)) | 5700 | if (intel_crtc_has_dp_encoder(pipe_config)) |
5738 | intel_dp_set_m_n(intel_crtc, M1_N1); | 5701 | intel_dp_set_m_n(pipe_config, M1_N1); |
5739 | 5702 | ||
5740 | if (!transcoder_is_dsi(cpu_transcoder)) | 5703 | if (!transcoder_is_dsi(cpu_transcoder)) |
5741 | intel_set_pipe_timings(intel_crtc); | 5704 | intel_set_pipe_timings(pipe_config); |
5742 | 5705 | ||
5743 | intel_set_pipe_src_size(intel_crtc); | 5706 | intel_set_pipe_src_size(pipe_config); |
5744 | 5707 | ||
5745 | if (cpu_transcoder != TRANSCODER_EDP && | 5708 | if (cpu_transcoder != TRANSCODER_EDP && |
5746 | !transcoder_is_dsi(cpu_transcoder)) { | 5709 | !transcoder_is_dsi(cpu_transcoder)) { |
5747 | I915_WRITE(PIPE_MULT(cpu_transcoder), | 5710 | I915_WRITE(PIPE_MULT(cpu_transcoder), |
5748 | intel_crtc->config->pixel_multiplier - 1); | 5711 | pipe_config->pixel_multiplier - 1); |
5749 | } | 5712 | } |
5750 | 5713 | ||
5751 | if (intel_crtc->config->has_pch_encoder) { | 5714 | if (pipe_config->has_pch_encoder) { |
5752 | intel_cpu_transcoder_set_m_n(intel_crtc, | 5715 | intel_cpu_transcoder_set_m_n(pipe_config, |
5753 | &intel_crtc->config->fdi_m_n, NULL); | 5716 | &pipe_config->fdi_m_n, NULL); |
5754 | } | 5717 | } |
5755 | 5718 | ||
5756 | if (!transcoder_is_dsi(cpu_transcoder)) | 5719 | if (!transcoder_is_dsi(cpu_transcoder)) |
5757 | haswell_set_pipeconf(crtc); | 5720 | haswell_set_pipeconf(pipe_config); |
5758 | 5721 | ||
5759 | haswell_set_pipemisc(crtc); | 5722 | haswell_set_pipemisc(pipe_config); |
5760 | 5723 | ||
5761 | intel_color_set_csc(&pipe_config->base); | 5724 | intel_color_set_csc(&pipe_config->base); |
5762 | 5725 | ||
@@ -5764,14 +5727,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5764 | 5727 | ||
5765 | /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ | 5728 | /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ |
5766 | psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && | 5729 | psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && |
5767 | intel_crtc->config->pch_pfit.enabled; | 5730 | pipe_config->pch_pfit.enabled; |
5768 | if (psl_clkgate_wa) | 5731 | if (psl_clkgate_wa) |
5769 | glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); | 5732 | glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); |
5770 | 5733 | ||
5771 | if (INTEL_GEN(dev_priv) >= 9) | 5734 | if (INTEL_GEN(dev_priv) >= 9) |
5772 | skylake_pfit_enable(intel_crtc); | 5735 | skylake_pfit_enable(pipe_config); |
5773 | else | 5736 | else |
5774 | ironlake_pfit_enable(intel_crtc); | 5737 | ironlake_pfit_enable(pipe_config); |
5775 | 5738 | ||
5776 | /* | 5739 | /* |
5777 | * On ILK+ LUT must be loaded before the pipe is running but with | 5740 | * On ILK+ LUT must be loaded before the pipe is running but with |
@@ -5804,10 +5767,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5804 | if (!transcoder_is_dsi(cpu_transcoder)) | 5767 | if (!transcoder_is_dsi(cpu_transcoder)) |
5805 | intel_enable_pipe(pipe_config); | 5768 | intel_enable_pipe(pipe_config); |
5806 | 5769 | ||
5807 | if (intel_crtc->config->has_pch_encoder) | 5770 | if (pipe_config->has_pch_encoder) |
5808 | lpt_pch_enable(old_intel_state, pipe_config); | 5771 | lpt_pch_enable(old_intel_state, pipe_config); |
5809 | 5772 | ||
5810 | if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) | 5773 | if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) |
5811 | intel_ddi_set_vc_payload_alloc(pipe_config, true); | 5774 | intel_ddi_set_vc_payload_alloc(pipe_config, true); |
5812 | 5775 | ||
5813 | assert_vblank_disabled(crtc); | 5776 | assert_vblank_disabled(crtc); |
@@ -5829,15 +5792,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, | |||
5829 | } | 5792 | } |
5830 | } | 5793 | } |
5831 | 5794 | ||
5832 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) | 5795 | static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state) |
5833 | { | 5796 | { |
5834 | struct drm_device *dev = crtc->base.dev; | 5797 | struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); |
5835 | struct drm_i915_private *dev_priv = to_i915(dev); | 5798 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5836 | int pipe = crtc->pipe; | 5799 | enum pipe pipe = crtc->pipe; |
5837 | 5800 | ||
5838 | /* To avoid upsetting the power well on haswell only disable the pfit if | 5801 | /* To avoid upsetting the power well on haswell only disable the pfit if |
5839 | * it's in use. The hw state code will make sure we get this right. */ | 5802 | * it's in use. The hw state code will make sure we get this right. */ |
5840 | if (force || crtc->config->pch_pfit.enabled) { | 5803 | if (old_crtc_state->pch_pfit.enabled) { |
5841 | I915_WRITE(PF_CTL(pipe), 0); | 5804 | I915_WRITE(PF_CTL(pipe), 0); |
5842 | I915_WRITE(PF_WIN_POS(pipe), 0); | 5805 | I915_WRITE(PF_WIN_POS(pipe), 0); |
5843 | I915_WRITE(PF_WIN_SZ(pipe), 0); | 5806 | I915_WRITE(PF_WIN_SZ(pipe), 0); |
@@ -5868,14 +5831,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
5868 | 5831 | ||
5869 | intel_disable_pipe(old_crtc_state); | 5832 | intel_disable_pipe(old_crtc_state); |
5870 | 5833 | ||
5871 | ironlake_pfit_disable(intel_crtc, false); | 5834 | ironlake_pfit_disable(old_crtc_state); |
5872 | 5835 | ||
5873 | if (intel_crtc->config->has_pch_encoder) | 5836 | if (old_crtc_state->has_pch_encoder) |
5874 | ironlake_fdi_disable(crtc); | 5837 | ironlake_fdi_disable(crtc); |
5875 | 5838 | ||
5876 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); | 5839 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); |
5877 | 5840 | ||
5878 | if (intel_crtc->config->has_pch_encoder) { | 5841 | if (old_crtc_state->has_pch_encoder) { |
5879 | ironlake_disable_pch_transcoder(dev_priv, pipe); | 5842 | ironlake_disable_pch_transcoder(dev_priv, pipe); |
5880 | 5843 | ||
5881 | if (HAS_PCH_CPT(dev_priv)) { | 5844 | if (HAS_PCH_CPT(dev_priv)) { |
@@ -5929,21 +5892,22 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
5929 | if (INTEL_GEN(dev_priv) >= 9) | 5892 | if (INTEL_GEN(dev_priv) >= 9) |
5930 | skylake_scaler_disable(intel_crtc); | 5893 | skylake_scaler_disable(intel_crtc); |
5931 | else | 5894 | else |
5932 | ironlake_pfit_disable(intel_crtc, false); | 5895 | ironlake_pfit_disable(old_crtc_state); |
5933 | 5896 | ||
5934 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); | 5897 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); |
5935 | 5898 | ||
5936 | if (INTEL_GEN(dev_priv) >= 11) | 5899 | if (INTEL_GEN(dev_priv) >= 11) |
5937 | icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); | 5900 | icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state); |
5901 | |||
5902 | intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); | ||
5938 | } | 5903 | } |
5939 | 5904 | ||
5940 | static void i9xx_pfit_enable(struct intel_crtc *crtc) | 5905 | static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) |
5941 | { | 5906 | { |
5942 | struct drm_device *dev = crtc->base.dev; | 5907 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
5943 | struct drm_i915_private *dev_priv = to_i915(dev); | 5908 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
5944 | struct intel_crtc_state *pipe_config = crtc->config; | ||
5945 | 5909 | ||
5946 | if (!pipe_config->gmch_pfit.control) | 5910 | if (!crtc_state->gmch_pfit.control) |
5947 | return; | 5911 | return; |
5948 | 5912 | ||
5949 | /* | 5913 | /* |
@@ -5953,8 +5917,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc) | |||
5953 | WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); | 5917 | WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); |
5954 | assert_pipe_disabled(dev_priv, crtc->pipe); | 5918 | assert_pipe_disabled(dev_priv, crtc->pipe); |
5955 | 5919 | ||
5956 | I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); | 5920 | I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios); |
5957 | I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); | 5921 | I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control); |
5958 | 5922 | ||
5959 | /* Border color in case we don't scale up to the full screen. Black by | 5923 | /* Border color in case we don't scale up to the full screen. Black by |
5960 | * default, change to something else for debugging. */ | 5924 | * default, change to something else for debugging. */ |
@@ -6009,6 +5973,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) | |||
6009 | } | 5973 | } |
6010 | } | 5974 | } |
6011 | 5975 | ||
5976 | enum intel_display_power_domain | ||
5977 | intel_aux_power_domain(struct intel_digital_port *dig_port) | ||
5978 | { | ||
5979 | switch (dig_port->aux_ch) { | ||
5980 | case AUX_CH_A: | ||
5981 | return POWER_DOMAIN_AUX_A; | ||
5982 | case AUX_CH_B: | ||
5983 | return POWER_DOMAIN_AUX_B; | ||
5984 | case AUX_CH_C: | ||
5985 | return POWER_DOMAIN_AUX_C; | ||
5986 | case AUX_CH_D: | ||
5987 | return POWER_DOMAIN_AUX_D; | ||
5988 | case AUX_CH_E: | ||
5989 | return POWER_DOMAIN_AUX_E; | ||
5990 | case AUX_CH_F: | ||
5991 | return POWER_DOMAIN_AUX_F; | ||
5992 | default: | ||
5993 | MISSING_CASE(dig_port->aux_ch); | ||
5994 | return POWER_DOMAIN_AUX_A; | ||
5995 | } | ||
5996 | } | ||
5997 | |||
6012 | static u64 get_crtc_power_domains(struct drm_crtc *crtc, | 5998 | static u64 get_crtc_power_domains(struct drm_crtc *crtc, |
6013 | struct intel_crtc_state *crtc_state) | 5999 | struct intel_crtc_state *crtc_state) |
6014 | { | 6000 | { |
@@ -6088,20 +6074,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6088 | if (WARN_ON(intel_crtc->active)) | 6074 | if (WARN_ON(intel_crtc->active)) |
6089 | return; | 6075 | return; |
6090 | 6076 | ||
6091 | if (intel_crtc_has_dp_encoder(intel_crtc->config)) | 6077 | if (intel_crtc_has_dp_encoder(pipe_config)) |
6092 | intel_dp_set_m_n(intel_crtc, M1_N1); | 6078 | intel_dp_set_m_n(pipe_config, M1_N1); |
6093 | 6079 | ||
6094 | intel_set_pipe_timings(intel_crtc); | 6080 | intel_set_pipe_timings(pipe_config); |
6095 | intel_set_pipe_src_size(intel_crtc); | 6081 | intel_set_pipe_src_size(pipe_config); |
6096 | 6082 | ||
6097 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { | 6083 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { |
6098 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
6099 | |||
6100 | I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); | 6084 | I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); |
6101 | I915_WRITE(CHV_CANVAS(pipe), 0); | 6085 | I915_WRITE(CHV_CANVAS(pipe), 0); |
6102 | } | 6086 | } |
6103 | 6087 | ||
6104 | i9xx_set_pipeconf(intel_crtc); | 6088 | i9xx_set_pipeconf(pipe_config); |
6105 | 6089 | ||
6106 | intel_color_set_csc(&pipe_config->base); | 6090 | intel_color_set_csc(&pipe_config->base); |
6107 | 6091 | ||
@@ -6112,16 +6096,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6112 | intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); | 6096 | intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); |
6113 | 6097 | ||
6114 | if (IS_CHERRYVIEW(dev_priv)) { | 6098 | if (IS_CHERRYVIEW(dev_priv)) { |
6115 | chv_prepare_pll(intel_crtc, intel_crtc->config); | 6099 | chv_prepare_pll(intel_crtc, pipe_config); |
6116 | chv_enable_pll(intel_crtc, intel_crtc->config); | 6100 | chv_enable_pll(intel_crtc, pipe_config); |
6117 | } else { | 6101 | } else { |
6118 | vlv_prepare_pll(intel_crtc, intel_crtc->config); | 6102 | vlv_prepare_pll(intel_crtc, pipe_config); |
6119 | vlv_enable_pll(intel_crtc, intel_crtc->config); | 6103 | vlv_enable_pll(intel_crtc, pipe_config); |
6120 | } | 6104 | } |
6121 | 6105 | ||
6122 | intel_encoders_pre_enable(crtc, pipe_config, old_state); | 6106 | intel_encoders_pre_enable(crtc, pipe_config, old_state); |
6123 | 6107 | ||
6124 | i9xx_pfit_enable(intel_crtc); | 6108 | i9xx_pfit_enable(pipe_config); |
6125 | 6109 | ||
6126 | intel_color_load_luts(&pipe_config->base); | 6110 | intel_color_load_luts(&pipe_config->base); |
6127 | 6111 | ||
@@ -6135,13 +6119,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6135 | intel_encoders_enable(crtc, pipe_config, old_state); | 6119 | intel_encoders_enable(crtc, pipe_config, old_state); |
6136 | } | 6120 | } |
6137 | 6121 | ||
6138 | static void i9xx_set_pll_dividers(struct intel_crtc *crtc) | 6122 | static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state) |
6139 | { | 6123 | { |
6140 | struct drm_device *dev = crtc->base.dev; | 6124 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
6141 | struct drm_i915_private *dev_priv = to_i915(dev); | 6125 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
6142 | 6126 | ||
6143 | I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); | 6127 | I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0); |
6144 | I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); | 6128 | I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1); |
6145 | } | 6129 | } |
6146 | 6130 | ||
6147 | static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, | 6131 | static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, |
@@ -6158,15 +6142,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6158 | if (WARN_ON(intel_crtc->active)) | 6142 | if (WARN_ON(intel_crtc->active)) |
6159 | return; | 6143 | return; |
6160 | 6144 | ||
6161 | i9xx_set_pll_dividers(intel_crtc); | 6145 | i9xx_set_pll_dividers(pipe_config); |
6162 | 6146 | ||
6163 | if (intel_crtc_has_dp_encoder(intel_crtc->config)) | 6147 | if (intel_crtc_has_dp_encoder(pipe_config)) |
6164 | intel_dp_set_m_n(intel_crtc, M1_N1); | 6148 | intel_dp_set_m_n(pipe_config, M1_N1); |
6165 | 6149 | ||
6166 | intel_set_pipe_timings(intel_crtc); | 6150 | intel_set_pipe_timings(pipe_config); |
6167 | intel_set_pipe_src_size(intel_crtc); | 6151 | intel_set_pipe_src_size(pipe_config); |
6168 | 6152 | ||
6169 | i9xx_set_pipeconf(intel_crtc); | 6153 | i9xx_set_pipeconf(pipe_config); |
6170 | 6154 | ||
6171 | intel_crtc->active = true; | 6155 | intel_crtc->active = true; |
6172 | 6156 | ||
@@ -6177,13 +6161,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6177 | 6161 | ||
6178 | i9xx_enable_pll(intel_crtc, pipe_config); | 6162 | i9xx_enable_pll(intel_crtc, pipe_config); |
6179 | 6163 | ||
6180 | i9xx_pfit_enable(intel_crtc); | 6164 | i9xx_pfit_enable(pipe_config); |
6181 | 6165 | ||
6182 | intel_color_load_luts(&pipe_config->base); | 6166 | intel_color_load_luts(&pipe_config->base); |
6183 | 6167 | ||
6184 | if (dev_priv->display.initial_watermarks != NULL) | 6168 | if (dev_priv->display.initial_watermarks != NULL) |
6185 | dev_priv->display.initial_watermarks(old_intel_state, | 6169 | dev_priv->display.initial_watermarks(old_intel_state, |
6186 | intel_crtc->config); | 6170 | pipe_config); |
6187 | else | 6171 | else |
6188 | intel_update_watermarks(intel_crtc); | 6172 | intel_update_watermarks(intel_crtc); |
6189 | intel_enable_pipe(pipe_config); | 6173 | intel_enable_pipe(pipe_config); |
@@ -6194,12 +6178,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, | |||
6194 | intel_encoders_enable(crtc, pipe_config, old_state); | 6178 | intel_encoders_enable(crtc, pipe_config, old_state); |
6195 | } | 6179 | } |
6196 | 6180 | ||
6197 | static void i9xx_pfit_disable(struct intel_crtc *crtc) | 6181 | static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) |
6198 | { | 6182 | { |
6199 | struct drm_device *dev = crtc->base.dev; | 6183 | struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); |
6200 | struct drm_i915_private *dev_priv = to_i915(dev); | 6184 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
6201 | 6185 | ||
6202 | if (!crtc->config->gmch_pfit.control) | 6186 | if (!old_crtc_state->gmch_pfit.control) |
6203 | return; | 6187 | return; |
6204 | 6188 | ||
6205 | assert_pipe_disabled(dev_priv, crtc->pipe); | 6189 | assert_pipe_disabled(dev_priv, crtc->pipe); |
@@ -6232,17 +6216,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, | |||
6232 | 6216 | ||
6233 | intel_disable_pipe(old_crtc_state); | 6217 | intel_disable_pipe(old_crtc_state); |
6234 | 6218 | ||
6235 | i9xx_pfit_disable(intel_crtc); | 6219 | i9xx_pfit_disable(old_crtc_state); |
6236 | 6220 | ||
6237 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); | 6221 | intel_encoders_post_disable(crtc, old_crtc_state, old_state); |
6238 | 6222 | ||
6239 | if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { | 6223 | if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { |
6240 | if (IS_CHERRYVIEW(dev_priv)) | 6224 | if (IS_CHERRYVIEW(dev_priv)) |
6241 | chv_disable_pll(dev_priv, pipe); | 6225 | chv_disable_pll(dev_priv, pipe); |
6242 | else if (IS_VALLEYVIEW(dev_priv)) | 6226 | else if (IS_VALLEYVIEW(dev_priv)) |
6243 | vlv_disable_pll(dev_priv, pipe); | 6227 | vlv_disable_pll(dev_priv, pipe); |
6244 | else | 6228 | else |
6245 | i9xx_disable_pll(intel_crtc); | 6229 | i9xx_disable_pll(old_crtc_state); |
6246 | } | 6230 | } |
6247 | 6231 | ||
6248 | intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); | 6232 | intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); |
@@ -6316,7 +6300,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, | |||
6316 | 6300 | ||
6317 | intel_fbc_disable(intel_crtc); | 6301 | intel_fbc_disable(intel_crtc); |
6318 | intel_update_watermarks(intel_crtc); | 6302 | intel_update_watermarks(intel_crtc); |
6319 | intel_disable_shared_dpll(intel_crtc); | 6303 | intel_disable_shared_dpll(to_intel_crtc_state(crtc->state)); |
6320 | 6304 | ||
6321 | domains = intel_crtc->enabled_power_domains; | 6305 | domains = intel_crtc->enabled_power_domains; |
6322 | for_each_power_domain(domain, domains) | 6306 | for_each_power_domain(domain, domains) |
@@ -6394,66 +6378,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, | |||
6394 | } | 6378 | } |
6395 | } | 6379 | } |
6396 | 6380 | ||
6397 | int intel_connector_init(struct intel_connector *connector) | ||
6398 | { | ||
6399 | struct intel_digital_connector_state *conn_state; | ||
6400 | |||
6401 | /* | ||
6402 | * Allocate enough memory to hold intel_digital_connector_state, | ||
6403 | * This might be a few bytes too many, but for connectors that don't | ||
6404 | * need it we'll free the state and allocate a smaller one on the first | ||
6405 | * succesful commit anyway. | ||
6406 | */ | ||
6407 | conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); | ||
6408 | if (!conn_state) | ||
6409 | return -ENOMEM; | ||
6410 | |||
6411 | __drm_atomic_helper_connector_reset(&connector->base, | ||
6412 | &conn_state->base); | ||
6413 | |||
6414 | return 0; | ||
6415 | } | ||
6416 | |||
6417 | struct intel_connector *intel_connector_alloc(void) | ||
6418 | { | ||
6419 | struct intel_connector *connector; | ||
6420 | |||
6421 | connector = kzalloc(sizeof *connector, GFP_KERNEL); | ||
6422 | if (!connector) | ||
6423 | return NULL; | ||
6424 | |||
6425 | if (intel_connector_init(connector) < 0) { | ||
6426 | kfree(connector); | ||
6427 | return NULL; | ||
6428 | } | ||
6429 | |||
6430 | return connector; | ||
6431 | } | ||
6432 | |||
6433 | /* | ||
6434 | * Free the bits allocated by intel_connector_alloc. | ||
6435 | * This should only be used after intel_connector_alloc has returned | ||
6436 | * successfully, and before drm_connector_init returns successfully. | ||
6437 | * Otherwise the destroy callbacks for the connector and the state should | ||
6438 | * take care of proper cleanup/free | ||
6439 | */ | ||
6440 | void intel_connector_free(struct intel_connector *connector) | ||
6441 | { | ||
6442 | kfree(to_intel_digital_connector_state(connector->base.state)); | ||
6443 | kfree(connector); | ||
6444 | } | ||
6445 | |||
6446 | /* Simple connector->get_hw_state implementation for encoders that support only | ||
6447 | * one connector and no cloning and hence the encoder state determines the state | ||
6448 | * of the connector. */ | ||
6449 | bool intel_connector_get_hw_state(struct intel_connector *connector) | ||
6450 | { | ||
6451 | enum pipe pipe = 0; | ||
6452 | struct intel_encoder *encoder = connector->encoder; | ||
6453 | |||
6454 | return encoder->get_hw_state(encoder, &pipe); | ||
6455 | } | ||
6456 | |||
6457 | static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) | 6381 | static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) |
6458 | { | 6382 | { |
6459 | if (crtc_state->base.enable && crtc_state->has_pch_encoder) | 6383 | if (crtc_state->base.enable && crtc_state->has_pch_encoder) |
@@ -6564,6 +6488,9 @@ retry: | |||
6564 | link_bw, &pipe_config->fdi_m_n, false); | 6488 | link_bw, &pipe_config->fdi_m_n, false); |
6565 | 6489 | ||
6566 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); | 6490 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); |
6491 | if (ret == -EDEADLK) | ||
6492 | return ret; | ||
6493 | |||
6567 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { | 6494 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { |
6568 | pipe_config->pipe_bpp -= 2*3; | 6495 | pipe_config->pipe_bpp -= 2*3; |
6569 | DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", | 6496 | DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", |
@@ -6720,7 +6647,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, | |||
6720 | return -EINVAL; | 6647 | return -EINVAL; |
6721 | } | 6648 | } |
6722 | 6649 | ||
6723 | if (pipe_config->ycbcr420 && pipe_config->base.ctm) { | 6650 | if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || |
6651 | pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) && | ||
6652 | pipe_config->base.ctm) { | ||
6724 | /* | 6653 | /* |
6725 | * There is only one pipe CSC unit per pipe, and we need that | 6654 | * There is only one pipe CSC unit per pipe, and we need that |
6726 | * for output conversion from RGB->YCBCR. So if CTM is already | 6655 | * for output conversion from RGB->YCBCR. So if CTM is already |
@@ -6886,12 +6815,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe | |||
6886 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); | 6815 | vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); |
6887 | } | 6816 | } |
6888 | 6817 | ||
6889 | static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, | 6818 | static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, |
6890 | struct intel_link_m_n *m_n) | 6819 | const struct intel_link_m_n *m_n) |
6891 | { | 6820 | { |
6892 | struct drm_device *dev = crtc->base.dev; | 6821 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
6893 | struct drm_i915_private *dev_priv = to_i915(dev); | 6822 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
6894 | int pipe = crtc->pipe; | 6823 | enum pipe pipe = crtc->pipe; |
6895 | 6824 | ||
6896 | I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); | 6825 | I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); |
6897 | I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); | 6826 | I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); |
@@ -6899,25 +6828,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, | |||
6899 | I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); | 6828 | I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); |
6900 | } | 6829 | } |
6901 | 6830 | ||
6902 | static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, | 6831 | static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, |
6903 | struct intel_link_m_n *m_n, | 6832 | enum transcoder transcoder) |
6904 | struct intel_link_m_n *m2_n2) | ||
6905 | { | 6833 | { |
6834 | if (IS_HASWELL(dev_priv)) | ||
6835 | return transcoder == TRANSCODER_EDP; | ||
6836 | |||
6837 | /* | ||
6838 | * Strictly speaking some registers are available before | ||
6839 | * gen7, but we only support DRRS on gen7+ | ||
6840 | */ | ||
6841 | return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv); | ||
6842 | } | ||
6843 | |||
6844 | static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, | ||
6845 | const struct intel_link_m_n *m_n, | ||
6846 | const struct intel_link_m_n *m2_n2) | ||
6847 | { | ||
6848 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
6906 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 6849 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
6907 | int pipe = crtc->pipe; | 6850 | enum pipe pipe = crtc->pipe; |
6908 | enum transcoder transcoder = crtc->config->cpu_transcoder; | 6851 | enum transcoder transcoder = crtc_state->cpu_transcoder; |
6909 | 6852 | ||
6910 | if (INTEL_GEN(dev_priv) >= 5) { | 6853 | if (INTEL_GEN(dev_priv) >= 5) { |
6911 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); | 6854 | I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); |
6912 | I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); | 6855 | I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); |
6913 | I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); | 6856 | I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); |
6914 | I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); | 6857 | I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); |
6915 | /* M2_N2 registers to be set only for gen < 8 (M2_N2 available | 6858 | /* |
6916 | * for gen < 8) and if DRRS is supported (to make sure the | 6859 | * M2_N2 registers are set only if DRRS is supported |
6917 | * registers are not unnecessarily accessed). | 6860 | * (to make sure the registers are not unnecessarily accessed). |
6918 | */ | 6861 | */ |
6919 | if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || | 6862 | if (m2_n2 && crtc_state->has_drrs && |
6920 | INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { | 6863 | transcoder_has_m2_n2(dev_priv, transcoder)) { |
6921 | I915_WRITE(PIPE_DATA_M2(transcoder), | 6864 | I915_WRITE(PIPE_DATA_M2(transcoder), |
6922 | TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); | 6865 | TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); |
6923 | I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); | 6866 | I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); |
@@ -6932,29 +6875,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, | |||
6932 | } | 6875 | } |
6933 | } | 6876 | } |
6934 | 6877 | ||
6935 | void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) | 6878 | void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) |
6936 | { | 6879 | { |
6937 | struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; | 6880 | const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; |
6938 | 6881 | ||
6939 | if (m_n == M1_N1) { | 6882 | if (m_n == M1_N1) { |
6940 | dp_m_n = &crtc->config->dp_m_n; | 6883 | dp_m_n = &crtc_state->dp_m_n; |
6941 | dp_m2_n2 = &crtc->config->dp_m2_n2; | 6884 | dp_m2_n2 = &crtc_state->dp_m2_n2; |
6942 | } else if (m_n == M2_N2) { | 6885 | } else if (m_n == M2_N2) { |
6943 | 6886 | ||
6944 | /* | 6887 | /* |
6945 | * M2_N2 registers are not supported. Hence m2_n2 divider value | 6888 | * M2_N2 registers are not supported. Hence m2_n2 divider value |
6946 | * needs to be programmed into M1_N1. | 6889 | * needs to be programmed into M1_N1. |
6947 | */ | 6890 | */ |
6948 | dp_m_n = &crtc->config->dp_m2_n2; | 6891 | dp_m_n = &crtc_state->dp_m2_n2; |
6949 | } else { | 6892 | } else { |
6950 | DRM_ERROR("Unsupported divider value\n"); | 6893 | DRM_ERROR("Unsupported divider value\n"); |
6951 | return; | 6894 | return; |
6952 | } | 6895 | } |
6953 | 6896 | ||
6954 | if (crtc->config->has_pch_encoder) | 6897 | if (crtc_state->has_pch_encoder) |
6955 | intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); | 6898 | intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); |
6956 | else | 6899 | else |
6957 | intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); | 6900 | intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); |
6958 | } | 6901 | } |
6959 | 6902 | ||
6960 | static void vlv_compute_dpll(struct intel_crtc *crtc, | 6903 | static void vlv_compute_dpll(struct intel_crtc *crtc, |
@@ -7053,8 +6996,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, | |||
7053 | 6996 | ||
7054 | /* Set HBR and RBR LPF coefficients */ | 6997 | /* Set HBR and RBR LPF coefficients */ |
7055 | if (pipe_config->port_clock == 162000 || | 6998 | if (pipe_config->port_clock == 162000 || |
7056 | intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || | 6999 | intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) || |
7057 | intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) | 7000 | intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) |
7058 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), | 7001 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), |
7059 | 0x009f0003); | 7002 | 0x009f0003); |
7060 | else | 7003 | else |
@@ -7081,7 +7024,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, | |||
7081 | 7024 | ||
7082 | coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); | 7025 | coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); |
7083 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; | 7026 | coreclk = (coreclk & 0x0000ff00) | 0x01c00000; |
7084 | if (intel_crtc_has_dp_encoder(crtc->config)) | 7027 | if (intel_crtc_has_dp_encoder(pipe_config)) |
7085 | coreclk |= 0x01000000; | 7028 | coreclk |= 0x01000000; |
7086 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); | 7029 | vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); |
7087 | 7030 | ||
@@ -7360,12 +7303,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, | |||
7360 | crtc_state->dpll_hw_state.dpll = dpll; | 7303 | crtc_state->dpll_hw_state.dpll = dpll; |
7361 | } | 7304 | } |
7362 | 7305 | ||
7363 | static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) | 7306 | static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state) |
7364 | { | 7307 | { |
7365 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); | 7308 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
7366 | enum pipe pipe = intel_crtc->pipe; | 7309 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
7367 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 7310 | enum pipe pipe = crtc->pipe; |
7368 | const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; | 7311 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
7312 | const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; | ||
7369 | uint32_t crtc_vtotal, crtc_vblank_end; | 7313 | uint32_t crtc_vtotal, crtc_vblank_end; |
7370 | int vsyncshift = 0; | 7314 | int vsyncshift = 0; |
7371 | 7315 | ||
@@ -7379,7 +7323,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) | |||
7379 | crtc_vtotal -= 1; | 7323 | crtc_vtotal -= 1; |
7380 | crtc_vblank_end -= 1; | 7324 | crtc_vblank_end -= 1; |
7381 | 7325 | ||
7382 | if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) | 7326 | if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) |
7383 | vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; | 7327 | vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; |
7384 | else | 7328 | else |
7385 | vsyncshift = adjusted_mode->crtc_hsync_start - | 7329 | vsyncshift = adjusted_mode->crtc_hsync_start - |
@@ -7421,18 +7365,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) | |||
7421 | 7365 | ||
7422 | } | 7366 | } |
7423 | 7367 | ||
7424 | static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) | 7368 | static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) |
7425 | { | 7369 | { |
7426 | struct drm_device *dev = intel_crtc->base.dev; | 7370 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
7427 | struct drm_i915_private *dev_priv = to_i915(dev); | 7371 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
7428 | enum pipe pipe = intel_crtc->pipe; | 7372 | enum pipe pipe = crtc->pipe; |
7429 | 7373 | ||
7430 | /* pipesrc controls the size that is scaled from, which should | 7374 | /* pipesrc controls the size that is scaled from, which should |
7431 | * always be the user's requested size. | 7375 | * always be the user's requested size. |
7432 | */ | 7376 | */ |
7433 | I915_WRITE(PIPESRC(pipe), | 7377 | I915_WRITE(PIPESRC(pipe), |
7434 | ((intel_crtc->config->pipe_src_w - 1) << 16) | | 7378 | ((crtc_state->pipe_src_w - 1) << 16) | |
7435 | (intel_crtc->config->pipe_src_h - 1)); | 7379 | (crtc_state->pipe_src_h - 1)); |
7436 | } | 7380 | } |
7437 | 7381 | ||
7438 | static void intel_get_pipe_timings(struct intel_crtc *crtc, | 7382 | static void intel_get_pipe_timings(struct intel_crtc *crtc, |
@@ -7508,29 +7452,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, | |||
7508 | drm_mode_set_name(mode); | 7452 | drm_mode_set_name(mode); |
7509 | } | 7453 | } |
7510 | 7454 | ||
7511 | static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | 7455 | static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) |
7512 | { | 7456 | { |
7513 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); | 7457 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
7458 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
7514 | uint32_t pipeconf; | 7459 | uint32_t pipeconf; |
7515 | 7460 | ||
7516 | pipeconf = 0; | 7461 | pipeconf = 0; |
7517 | 7462 | ||
7518 | /* we keep both pipes enabled on 830 */ | 7463 | /* we keep both pipes enabled on 830 */ |
7519 | if (IS_I830(dev_priv)) | 7464 | if (IS_I830(dev_priv)) |
7520 | pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; | 7465 | pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE; |
7521 | 7466 | ||
7522 | if (intel_crtc->config->double_wide) | 7467 | if (crtc_state->double_wide) |
7523 | pipeconf |= PIPECONF_DOUBLE_WIDE; | 7468 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
7524 | 7469 | ||
7525 | /* only g4x and later have fancy bpc/dither controls */ | 7470 | /* only g4x and later have fancy bpc/dither controls */ |
7526 | if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || | 7471 | if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || |
7527 | IS_CHERRYVIEW(dev_priv)) { | 7472 | IS_CHERRYVIEW(dev_priv)) { |
7528 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ | 7473 | /* Bspec claims that we can't use dithering for 30bpp pipes. */ |
7529 | if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) | 7474 | if (crtc_state->dither && crtc_state->pipe_bpp != 30) |
7530 | pipeconf |= PIPECONF_DITHER_EN | | 7475 | pipeconf |= PIPECONF_DITHER_EN | |
7531 | PIPECONF_DITHER_TYPE_SP; | 7476 | PIPECONF_DITHER_TYPE_SP; |
7532 | 7477 | ||
7533 | switch (intel_crtc->config->pipe_bpp) { | 7478 | switch (crtc_state->pipe_bpp) { |
7534 | case 18: | 7479 | case 18: |
7535 | pipeconf |= PIPECONF_6BPC; | 7480 | pipeconf |= PIPECONF_6BPC; |
7536 | break; | 7481 | break; |
@@ -7546,9 +7491,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | |||
7546 | } | 7491 | } |
7547 | } | 7492 | } |
7548 | 7493 | ||
7549 | if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { | 7494 | if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
7550 | if (INTEL_GEN(dev_priv) < 4 || | 7495 | if (INTEL_GEN(dev_priv) < 4 || |
7551 | intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) | 7496 | intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) |
7552 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | 7497 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; |
7553 | else | 7498 | else |
7554 | pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; | 7499 | pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; |
@@ -7556,11 +7501,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) | |||
7556 | pipeconf |= PIPECONF_PROGRESSIVE; | 7501 | pipeconf |= PIPECONF_PROGRESSIVE; |
7557 | 7502 | ||
7558 | if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && | 7503 | if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && |
7559 | intel_crtc->config->limited_color_range) | 7504 | crtc_state->limited_color_range) |
7560 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; | 7505 | pipeconf |= PIPECONF_COLOR_RANGE_SELECT; |
7561 | 7506 | ||
7562 | I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); | 7507 | I915_WRITE(PIPECONF(crtc->pipe), pipeconf); |
7563 | POSTING_READ(PIPECONF(intel_crtc->pipe)); | 7508 | POSTING_READ(PIPECONF(crtc->pipe)); |
7564 | } | 7509 | } |
7565 | 7510 | ||
7566 | static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, | 7511 | static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, |
@@ -7843,8 +7788,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
7843 | plane_config->tiling = I915_TILING_X; | 7788 | plane_config->tiling = I915_TILING_X; |
7844 | fb->modifier = I915_FORMAT_MOD_X_TILED; | 7789 | fb->modifier = I915_FORMAT_MOD_X_TILED; |
7845 | } | 7790 | } |
7791 | |||
7792 | if (val & DISPPLANE_ROTATE_180) | ||
7793 | plane_config->rotation = DRM_MODE_ROTATE_180; | ||
7846 | } | 7794 | } |
7847 | 7795 | ||
7796 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && | ||
7797 | val & DISPPLANE_MIRROR) | ||
7798 | plane_config->rotation |= DRM_MODE_REFLECT_X; | ||
7799 | |||
7848 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | 7800 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; |
7849 | fourcc = i9xx_format_to_fourcc(pixel_format); | 7801 | fourcc = i9xx_format_to_fourcc(pixel_format); |
7850 | fb->format = drm_format_info(fourcc); | 7802 | fb->format = drm_format_info(fourcc); |
@@ -7916,6 +7868,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, | |||
7916 | pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); | 7868 | pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); |
7917 | } | 7869 | } |
7918 | 7870 | ||
7871 | static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, | ||
7872 | struct intel_crtc_state *pipe_config) | ||
7873 | { | ||
7874 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
7875 | enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; | ||
7876 | |||
7877 | pipe_config->lspcon_downsampling = false; | ||
7878 | |||
7879 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { | ||
7880 | u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); | ||
7881 | |||
7882 | if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { | ||
7883 | bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; | ||
7884 | bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; | ||
7885 | |||
7886 | if (ycbcr420_enabled) { | ||
7887 | /* We support 4:2:0 in full blend mode only */ | ||
7888 | if (!blend) | ||
7889 | output = INTEL_OUTPUT_FORMAT_INVALID; | ||
7890 | else if (!(IS_GEMINILAKE(dev_priv) || | ||
7891 | INTEL_GEN(dev_priv) >= 10)) | ||
7892 | output = INTEL_OUTPUT_FORMAT_INVALID; | ||
7893 | else | ||
7894 | output = INTEL_OUTPUT_FORMAT_YCBCR420; | ||
7895 | } else { | ||
7896 | /* | ||
7897 | * Currently there is no interface defined to | ||
7898 | * check user preference between RGB/YCBCR444 | ||
7899 | * or YCBCR420. So the only possible case for | ||
7900 | * YCBCR444 usage is driving YCBCR420 output | ||
7901 | * with LSPCON, when pipe is configured for | ||
7902 | * YCBCR444 output and LSPCON takes care of | ||
7903 | * downsampling it. | ||
7904 | */ | ||
7905 | pipe_config->lspcon_downsampling = true; | ||
7906 | output = INTEL_OUTPUT_FORMAT_YCBCR444; | ||
7907 | } | ||
7908 | } | ||
7909 | } | ||
7910 | |||
7911 | pipe_config->output_format = output; | ||
7912 | } | ||
7913 | |||
7919 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | 7914 | static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
7920 | struct intel_crtc_state *pipe_config) | 7915 | struct intel_crtc_state *pipe_config) |
7921 | { | 7916 | { |
@@ -7928,6 +7923,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, | |||
7928 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 7923 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
7929 | return false; | 7924 | return false; |
7930 | 7925 | ||
7926 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
7931 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 7927 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
7932 | pipe_config->shared_dpll = NULL; | 7928 | pipe_config->shared_dpll = NULL; |
7933 | 7929 | ||
@@ -8459,16 +8455,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv) | |||
8459 | lpt_init_pch_refclk(dev_priv); | 8455 | lpt_init_pch_refclk(dev_priv); |
8460 | } | 8456 | } |
8461 | 8457 | ||
8462 | static void ironlake_set_pipeconf(struct drm_crtc *crtc) | 8458 | static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) |
8463 | { | 8459 | { |
8464 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 8460 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
8465 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8461 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
8466 | int pipe = intel_crtc->pipe; | 8462 | enum pipe pipe = crtc->pipe; |
8467 | uint32_t val; | 8463 | uint32_t val; |
8468 | 8464 | ||
8469 | val = 0; | 8465 | val = 0; |
8470 | 8466 | ||
8471 | switch (intel_crtc->config->pipe_bpp) { | 8467 | switch (crtc_state->pipe_bpp) { |
8472 | case 18: | 8468 | case 18: |
8473 | val |= PIPECONF_6BPC; | 8469 | val |= PIPECONF_6BPC; |
8474 | break; | 8470 | break; |
@@ -8486,32 +8482,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) | |||
8486 | BUG(); | 8482 | BUG(); |
8487 | } | 8483 | } |
8488 | 8484 | ||
8489 | if (intel_crtc->config->dither) | 8485 | if (crtc_state->dither) |
8490 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); | 8486 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
8491 | 8487 | ||
8492 | if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) | 8488 | if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) |
8493 | val |= PIPECONF_INTERLACED_ILK; | 8489 | val |= PIPECONF_INTERLACED_ILK; |
8494 | else | 8490 | else |
8495 | val |= PIPECONF_PROGRESSIVE; | 8491 | val |= PIPECONF_PROGRESSIVE; |
8496 | 8492 | ||
8497 | if (intel_crtc->config->limited_color_range) | 8493 | if (crtc_state->limited_color_range) |
8498 | val |= PIPECONF_COLOR_RANGE_SELECT; | 8494 | val |= PIPECONF_COLOR_RANGE_SELECT; |
8499 | 8495 | ||
8500 | I915_WRITE(PIPECONF(pipe), val); | 8496 | I915_WRITE(PIPECONF(pipe), val); |
8501 | POSTING_READ(PIPECONF(pipe)); | 8497 | POSTING_READ(PIPECONF(pipe)); |
8502 | } | 8498 | } |
8503 | 8499 | ||
8504 | static void haswell_set_pipeconf(struct drm_crtc *crtc) | 8500 | static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) |
8505 | { | 8501 | { |
8506 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 8502 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
8507 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8503 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
8508 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 8504 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
8509 | u32 val = 0; | 8505 | u32 val = 0; |
8510 | 8506 | ||
8511 | if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) | 8507 | if (IS_HASWELL(dev_priv) && crtc_state->dither) |
8512 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); | 8508 | val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); |
8513 | 8509 | ||
8514 | if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) | 8510 | if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) |
8515 | val |= PIPECONF_INTERLACED_ILK; | 8511 | val |= PIPECONF_INTERLACED_ILK; |
8516 | else | 8512 | else |
8517 | val |= PIPECONF_PROGRESSIVE; | 8513 | val |= PIPECONF_PROGRESSIVE; |
@@ -8520,16 +8516,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) | |||
8520 | POSTING_READ(PIPECONF(cpu_transcoder)); | 8516 | POSTING_READ(PIPECONF(cpu_transcoder)); |
8521 | } | 8517 | } |
8522 | 8518 | ||
8523 | static void haswell_set_pipemisc(struct drm_crtc *crtc) | 8519 | static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state) |
8524 | { | 8520 | { |
8525 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); | 8521 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
8526 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 8522 | struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); |
8527 | struct intel_crtc_state *config = intel_crtc->config; | ||
8528 | 8523 | ||
8529 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { | 8524 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { |
8530 | u32 val = 0; | 8525 | u32 val = 0; |
8531 | 8526 | ||
8532 | switch (intel_crtc->config->pipe_bpp) { | 8527 | switch (crtc_state->pipe_bpp) { |
8533 | case 18: | 8528 | case 18: |
8534 | val |= PIPEMISC_DITHER_6_BPC; | 8529 | val |= PIPEMISC_DITHER_6_BPC; |
8535 | break; | 8530 | break; |
@@ -8547,14 +8542,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc) | |||
8547 | BUG(); | 8542 | BUG(); |
8548 | } | 8543 | } |
8549 | 8544 | ||
8550 | if (intel_crtc->config->dither) | 8545 | if (crtc_state->dither) |
8551 | val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; | 8546 | val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; |
8552 | 8547 | ||
8553 | if (config->ycbcr420) { | 8548 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || |
8554 | val |= PIPEMISC_OUTPUT_COLORSPACE_YUV | | 8549 | crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) |
8555 | PIPEMISC_YUV420_ENABLE | | 8550 | val |= PIPEMISC_OUTPUT_COLORSPACE_YUV; |
8551 | |||
8552 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) | ||
8553 | val |= PIPEMISC_YUV420_ENABLE | | ||
8556 | PIPEMISC_YUV420_MODE_FULL_BLEND; | 8554 | PIPEMISC_YUV420_MODE_FULL_BLEND; |
8557 | } | ||
8558 | 8555 | ||
8559 | I915_WRITE(PIPEMISC(intel_crtc->pipe), val); | 8556 | I915_WRITE(PIPEMISC(intel_crtc->pipe), val); |
8560 | } | 8557 | } |
@@ -8765,12 +8762,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, | |||
8765 | m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); | 8762 | m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); |
8766 | m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) | 8763 | m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) |
8767 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; | 8764 | & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; |
8768 | /* Read M2_N2 registers only for gen < 8 (M2_N2 available for | 8765 | |
8769 | * gen < 8) and if DRRS is supported (to make sure the | 8766 | if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { |
8770 | * registers are not unnecessarily read). | ||
8771 | */ | ||
8772 | if (m2_n2 && INTEL_GEN(dev_priv) < 8 && | ||
8773 | crtc->config->has_drrs) { | ||
8774 | m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); | 8767 | m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); |
8775 | m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); | 8768 | m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); |
8776 | m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) | 8769 | m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) |
@@ -8913,6 +8906,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
8913 | goto error; | 8906 | goto error; |
8914 | } | 8907 | } |
8915 | 8908 | ||
8909 | /* | ||
8910 | * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr | ||
8911 | * while i915 HW rotation is clockwise, thats why this swapping. | ||
8912 | */ | ||
8913 | switch (val & PLANE_CTL_ROTATE_MASK) { | ||
8914 | case PLANE_CTL_ROTATE_0: | ||
8915 | plane_config->rotation = DRM_MODE_ROTATE_0; | ||
8916 | break; | ||
8917 | case PLANE_CTL_ROTATE_90: | ||
8918 | plane_config->rotation = DRM_MODE_ROTATE_270; | ||
8919 | break; | ||
8920 | case PLANE_CTL_ROTATE_180: | ||
8921 | plane_config->rotation = DRM_MODE_ROTATE_180; | ||
8922 | break; | ||
8923 | case PLANE_CTL_ROTATE_270: | ||
8924 | plane_config->rotation = DRM_MODE_ROTATE_90; | ||
8925 | break; | ||
8926 | } | ||
8927 | |||
8928 | if (INTEL_GEN(dev_priv) >= 10 && | ||
8929 | val & PLANE_CTL_FLIP_HORIZONTAL) | ||
8930 | plane_config->rotation |= DRM_MODE_REFLECT_X; | ||
8931 | |||
8916 | base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; | 8932 | base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; |
8917 | plane_config->base = base; | 8933 | plane_config->base = base; |
8918 | 8934 | ||
@@ -8979,6 +8995,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, | |||
8979 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | 8995 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) |
8980 | return false; | 8996 | return false; |
8981 | 8997 | ||
8998 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
8982 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; | 8999 | pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
8983 | pipe_config->shared_dpll = NULL; | 9000 | pipe_config->shared_dpll = NULL; |
8984 | 9001 | ||
@@ -9327,30 +9344,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, | |||
9327 | u32 temp; | 9344 | u32 temp; |
9328 | 9345 | ||
9329 | /* TODO: TBT pll not implemented. */ | 9346 | /* TODO: TBT pll not implemented. */ |
9330 | switch (port) { | 9347 | if (intel_port_is_combophy(dev_priv, port)) { |
9331 | case PORT_A: | ||
9332 | case PORT_B: | ||
9333 | temp = I915_READ(DPCLKA_CFGCR0_ICL) & | 9348 | temp = I915_READ(DPCLKA_CFGCR0_ICL) & |
9334 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); | 9349 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); |
9335 | id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); | 9350 | id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); |
9336 | 9351 | ||
9337 | if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1)) | 9352 | if (WARN_ON(!intel_dpll_is_combophy(id))) |
9338 | return; | 9353 | return; |
9339 | break; | 9354 | } else if (intel_port_is_tc(dev_priv, port)) { |
9340 | case PORT_C: | 9355 | id = icl_port_to_mg_pll_id(port); |
9341 | id = DPLL_ID_ICL_MGPLL1; | 9356 | } else { |
9342 | break; | 9357 | WARN(1, "Invalid port %x\n", port); |
9343 | case PORT_D: | ||
9344 | id = DPLL_ID_ICL_MGPLL2; | ||
9345 | break; | ||
9346 | case PORT_E: | ||
9347 | id = DPLL_ID_ICL_MGPLL3; | ||
9348 | break; | ||
9349 | case PORT_F: | ||
9350 | id = DPLL_ID_ICL_MGPLL4; | ||
9351 | break; | ||
9352 | default: | ||
9353 | MISSING_CASE(port); | ||
9354 | return; | 9358 | return; |
9355 | } | 9359 | } |
9356 | 9360 | ||
@@ -9613,27 +9617,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
9613 | } | 9617 | } |
9614 | 9618 | ||
9615 | intel_get_pipe_src_size(crtc, pipe_config); | 9619 | intel_get_pipe_src_size(crtc, pipe_config); |
9620 | intel_get_crtc_ycbcr_config(crtc, pipe_config); | ||
9616 | 9621 | ||
9617 | pipe_config->gamma_mode = | 9622 | pipe_config->gamma_mode = |
9618 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; | 9623 | I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; |
9619 | 9624 | ||
9620 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { | ||
9621 | u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); | ||
9622 | bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV; | ||
9623 | |||
9624 | if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) { | ||
9625 | bool blend_mode_420 = tmp & | ||
9626 | PIPEMISC_YUV420_MODE_FULL_BLEND; | ||
9627 | |||
9628 | pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE; | ||
9629 | if (pipe_config->ycbcr420 != clrspace_yuv || | ||
9630 | pipe_config->ycbcr420 != blend_mode_420) | ||
9631 | DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp); | ||
9632 | } else if (clrspace_yuv) { | ||
9633 | DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n"); | ||
9634 | } | ||
9635 | } | ||
9636 | |||
9637 | power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); | 9625 | power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); |
9638 | if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { | 9626 | if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { |
9639 | power_domain_mask |= BIT_ULL(power_domain); | 9627 | power_domain_mask |= BIT_ULL(power_domain); |
@@ -9902,8 +9890,6 @@ static void i845_update_cursor(struct intel_plane *plane, | |||
9902 | I915_WRITE_FW(CURPOS(PIPE_A), pos); | 9890 | I915_WRITE_FW(CURPOS(PIPE_A), pos); |
9903 | } | 9891 | } |
9904 | 9892 | ||
9905 | POSTING_READ_FW(CURCNTR(PIPE_A)); | ||
9906 | |||
9907 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 9893 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
9908 | } | 9894 | } |
9909 | 9895 | ||
@@ -10132,8 +10118,6 @@ static void i9xx_update_cursor(struct intel_plane *plane, | |||
10132 | I915_WRITE_FW(CURBASE(pipe), base); | 10118 | I915_WRITE_FW(CURBASE(pipe), base); |
10133 | } | 10119 | } |
10134 | 10120 | ||
10135 | POSTING_READ_FW(CURBASE(pipe)); | ||
10136 | |||
10137 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 10121 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
10138 | } | 10122 | } |
10139 | 10123 | ||
@@ -10738,14 +10722,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat | |||
10738 | pipe_config->fb_bits |= plane->frontbuffer_bit; | 10722 | pipe_config->fb_bits |= plane->frontbuffer_bit; |
10739 | 10723 | ||
10740 | /* | 10724 | /* |
10725 | * ILK/SNB DVSACNTR/Sprite Enable | ||
10726 | * IVB SPR_CTL/Sprite Enable | ||
10727 | * "When in Self Refresh Big FIFO mode, a write to enable the | ||
10728 | * plane will be internally buffered and delayed while Big FIFO | ||
10729 | * mode is exiting." | ||
10730 | * | ||
10731 | * Which means that enabling the sprite can take an extra frame | ||
10732 | * when we start in big FIFO mode (LP1+). Thus we need to drop | ||
10733 | * down to LP0 and wait for vblank in order to make sure the | ||
10734 | * sprite gets enabled on the next vblank after the register write. | ||
10735 | * Doing otherwise would risk enabling the sprite one frame after | ||
10736 | * we've already signalled flip completion. We can resume LP1+ | ||
10737 | * once the sprite has been enabled. | ||
10738 | * | ||
10739 | * | ||
10741 | * WaCxSRDisabledForSpriteScaling:ivb | 10740 | * WaCxSRDisabledForSpriteScaling:ivb |
10741 | * IVB SPR_SCALE/Scaling Enable | ||
10742 | * "Low Power watermarks must be disabled for at least one | ||
10743 | * frame before enabling sprite scaling, and kept disabled | ||
10744 | * until sprite scaling is disabled." | ||
10742 | * | 10745 | * |
10743 | * cstate->update_wm was already set above, so this flag will | 10746 | * ILK/SNB DVSASCALE/Scaling Enable |
10744 | * take effect when we commit and program watermarks. | 10747 | * "When in Self Refresh Big FIFO mode, scaling enable will be |
10748 | * masked off while Big FIFO mode is exiting." | ||
10749 | * | ||
10750 | * Despite the w/a only being listed for IVB we assume that | ||
10751 | * the ILK/SNB note has similar ramifications, hence we apply | ||
10752 | * the w/a on all three platforms. | ||
10745 | */ | 10753 | */ |
10746 | if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && | 10754 | if (plane->id == PLANE_SPRITE0 && |
10747 | needs_scaling(to_intel_plane_state(plane_state)) && | 10755 | (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) || |
10748 | !needs_scaling(old_plane_state)) | 10756 | IS_IVYBRIDGE(dev_priv)) && |
10757 | (turn_on || (!needs_scaling(old_plane_state) && | ||
10758 | needs_scaling(to_intel_plane_state(plane_state))))) | ||
10749 | pipe_config->disable_lp_wm = true; | 10759 | pipe_config->disable_lp_wm = true; |
10750 | 10760 | ||
10751 | return 0; | 10761 | return 0; |
@@ -10781,6 +10791,98 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state, | |||
10781 | return true; | 10791 | return true; |
10782 | } | 10792 | } |
10783 | 10793 | ||
10794 | static int icl_add_linked_planes(struct intel_atomic_state *state) | ||
10795 | { | ||
10796 | struct intel_plane *plane, *linked; | ||
10797 | struct intel_plane_state *plane_state, *linked_plane_state; | ||
10798 | int i; | ||
10799 | |||
10800 | for_each_new_intel_plane_in_state(state, plane, plane_state, i) { | ||
10801 | linked = plane_state->linked_plane; | ||
10802 | |||
10803 | if (!linked) | ||
10804 | continue; | ||
10805 | |||
10806 | linked_plane_state = intel_atomic_get_plane_state(state, linked); | ||
10807 | if (IS_ERR(linked_plane_state)) | ||
10808 | return PTR_ERR(linked_plane_state); | ||
10809 | |||
10810 | WARN_ON(linked_plane_state->linked_plane != plane); | ||
10811 | WARN_ON(linked_plane_state->slave == plane_state->slave); | ||
10812 | } | ||
10813 | |||
10814 | return 0; | ||
10815 | } | ||
10816 | |||
10817 | static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) | ||
10818 | { | ||
10819 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
10820 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | ||
10821 | struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); | ||
10822 | struct intel_plane *plane, *linked; | ||
10823 | struct intel_plane_state *plane_state; | ||
10824 | int i; | ||
10825 | |||
10826 | if (INTEL_GEN(dev_priv) < 11) | ||
10827 | return 0; | ||
10828 | |||
10829 | /* | ||
10830 | * Destroy all old plane links and make the slave plane invisible | ||
10831 | * in the crtc_state->active_planes mask. | ||
10832 | */ | ||
10833 | for_each_new_intel_plane_in_state(state, plane, plane_state, i) { | ||
10834 | if (plane->pipe != crtc->pipe || !plane_state->linked_plane) | ||
10835 | continue; | ||
10836 | |||
10837 | plane_state->linked_plane = NULL; | ||
10838 | if (plane_state->slave && !plane_state->base.visible) | ||
10839 | crtc_state->active_planes &= ~BIT(plane->id); | ||
10840 | |||
10841 | plane_state->slave = false; | ||
10842 | } | ||
10843 | |||
10844 | if (!crtc_state->nv12_planes) | ||
10845 | return 0; | ||
10846 | |||
10847 | for_each_new_intel_plane_in_state(state, plane, plane_state, i) { | ||
10848 | struct intel_plane_state *linked_state = NULL; | ||
10849 | |||
10850 | if (plane->pipe != crtc->pipe || | ||
10851 | !(crtc_state->nv12_planes & BIT(plane->id))) | ||
10852 | continue; | ||
10853 | |||
10854 | for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) { | ||
10855 | if (!icl_is_nv12_y_plane(linked->id)) | ||
10856 | continue; | ||
10857 | |||
10858 | if (crtc_state->active_planes & BIT(linked->id)) | ||
10859 | continue; | ||
10860 | |||
10861 | linked_state = intel_atomic_get_plane_state(state, linked); | ||
10862 | if (IS_ERR(linked_state)) | ||
10863 | return PTR_ERR(linked_state); | ||
10864 | |||
10865 | break; | ||
10866 | } | ||
10867 | |||
10868 | if (!linked_state) { | ||
10869 | DRM_DEBUG_KMS("Need %d free Y planes for NV12\n", | ||
10870 | hweight8(crtc_state->nv12_planes)); | ||
10871 | |||
10872 | return -EINVAL; | ||
10873 | } | ||
10874 | |||
10875 | plane_state->linked_plane = linked; | ||
10876 | |||
10877 | linked_state->slave = true; | ||
10878 | linked_state->linked_plane = plane; | ||
10879 | crtc_state->active_planes |= BIT(linked->id); | ||
10880 | DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); | ||
10881 | } | ||
10882 | |||
10883 | return 0; | ||
10884 | } | ||
10885 | |||
10784 | static int intel_crtc_atomic_check(struct drm_crtc *crtc, | 10886 | static int intel_crtc_atomic_check(struct drm_crtc *crtc, |
10785 | struct drm_crtc_state *crtc_state) | 10887 | struct drm_crtc_state *crtc_state) |
10786 | { | 10888 | { |
@@ -10789,7 +10891,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
10789 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10891 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
10790 | struct intel_crtc_state *pipe_config = | 10892 | struct intel_crtc_state *pipe_config = |
10791 | to_intel_crtc_state(crtc_state); | 10893 | to_intel_crtc_state(crtc_state); |
10792 | struct drm_atomic_state *state = crtc_state->state; | ||
10793 | int ret; | 10894 | int ret; |
10794 | bool mode_changed = needs_modeset(crtc_state); | 10895 | bool mode_changed = needs_modeset(crtc_state); |
10795 | 10896 | ||
@@ -10826,8 +10927,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
10826 | } | 10927 | } |
10827 | } | 10928 | } |
10828 | 10929 | ||
10829 | if (dev_priv->display.compute_intermediate_wm && | 10930 | if (dev_priv->display.compute_intermediate_wm) { |
10830 | !to_intel_atomic_state(state)->skip_intermediate_wm) { | ||
10831 | if (WARN_ON(!dev_priv->display.compute_pipe_wm)) | 10931 | if (WARN_ON(!dev_priv->display.compute_pipe_wm)) |
10832 | return 0; | 10932 | return 0; |
10833 | 10933 | ||
@@ -10843,9 +10943,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
10843 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); | 10943 | DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); |
10844 | return ret; | 10944 | return ret; |
10845 | } | 10945 | } |
10846 | } else if (dev_priv->display.compute_intermediate_wm) { | ||
10847 | if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) | ||
10848 | pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; | ||
10849 | } | 10946 | } |
10850 | 10947 | ||
10851 | if (INTEL_GEN(dev_priv) >= 9) { | 10948 | if (INTEL_GEN(dev_priv) >= 9) { |
@@ -10853,6 +10950,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
10853 | ret = skl_update_scaler_crtc(pipe_config); | 10950 | ret = skl_update_scaler_crtc(pipe_config); |
10854 | 10951 | ||
10855 | if (!ret) | 10952 | if (!ret) |
10953 | ret = icl_check_nv12_planes(pipe_config); | ||
10954 | if (!ret) | ||
10856 | ret = skl_check_pipe_max_pixel_rate(intel_crtc, | 10955 | ret = skl_check_pipe_max_pixel_rate(intel_crtc, |
10857 | pipe_config); | 10956 | pipe_config); |
10858 | if (!ret) | 10957 | if (!ret) |
@@ -10867,8 +10966,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, | |||
10867 | } | 10966 | } |
10868 | 10967 | ||
10869 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { | 10968 | static const struct drm_crtc_helper_funcs intel_helper_funcs = { |
10870 | .atomic_begin = intel_begin_crtc_commit, | ||
10871 | .atomic_flush = intel_finish_crtc_commit, | ||
10872 | .atomic_check = intel_crtc_atomic_check, | 10969 | .atomic_check = intel_crtc_atomic_check, |
10873 | }; | 10970 | }; |
10874 | 10971 | ||
@@ -10897,30 +10994,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) | |||
10897 | drm_connector_list_iter_end(&conn_iter); | 10994 | drm_connector_list_iter_end(&conn_iter); |
10898 | } | 10995 | } |
10899 | 10996 | ||
10900 | static void | 10997 | static int |
10901 | connected_sink_compute_bpp(struct intel_connector *connector, | 10998 | compute_sink_pipe_bpp(const struct drm_connector_state *conn_state, |
10902 | struct intel_crtc_state *pipe_config) | 10999 | struct intel_crtc_state *pipe_config) |
10903 | { | 11000 | { |
10904 | const struct drm_display_info *info = &connector->base.display_info; | 11001 | struct drm_connector *connector = conn_state->connector; |
10905 | int bpp = pipe_config->pipe_bpp; | 11002 | const struct drm_display_info *info = &connector->display_info; |
11003 | int bpp; | ||
10906 | 11004 | ||
10907 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", | 11005 | switch (conn_state->max_bpc) { |
10908 | connector->base.base.id, | 11006 | case 6 ... 7: |
10909 | connector->base.name); | 11007 | bpp = 6 * 3; |
10910 | 11008 | break; | |
10911 | /* Don't use an invalid EDID bpc value */ | 11009 | case 8 ... 9: |
10912 | if (info->bpc != 0 && info->bpc * 3 < bpp) { | 11010 | bpp = 8 * 3; |
10913 | DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", | 11011 | break; |
10914 | bpp, info->bpc * 3); | 11012 | case 10 ... 11: |
10915 | pipe_config->pipe_bpp = info->bpc * 3; | 11013 | bpp = 10 * 3; |
11014 | break; | ||
11015 | case 12: | ||
11016 | bpp = 12 * 3; | ||
11017 | break; | ||
11018 | default: | ||
11019 | return -EINVAL; | ||
10916 | } | 11020 | } |
10917 | 11021 | ||
10918 | /* Clamp bpp to 8 on screens without EDID 1.4 */ | 11022 | if (bpp < pipe_config->pipe_bpp) { |
10919 | if (info->bpc == 0 && bpp > 24) { | 11023 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of " |
10920 | DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", | 11024 | "EDID bpp %d, requested bpp %d, max platform bpp %d\n", |
10921 | bpp); | 11025 | connector->base.id, connector->name, |
10922 | pipe_config->pipe_bpp = 24; | 11026 | bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc, |
11027 | pipe_config->pipe_bpp); | ||
11028 | |||
11029 | pipe_config->pipe_bpp = bpp; | ||
10923 | } | 11030 | } |
11031 | |||
11032 | return 0; | ||
10924 | } | 11033 | } |
10925 | 11034 | ||
10926 | static int | 11035 | static int |
@@ -10928,7 +11037,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, | |||
10928 | struct intel_crtc_state *pipe_config) | 11037 | struct intel_crtc_state *pipe_config) |
10929 | { | 11038 | { |
10930 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 11039 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
10931 | struct drm_atomic_state *state; | 11040 | struct drm_atomic_state *state = pipe_config->base.state; |
10932 | struct drm_connector *connector; | 11041 | struct drm_connector *connector; |
10933 | struct drm_connector_state *connector_state; | 11042 | struct drm_connector_state *connector_state; |
10934 | int bpp, i; | 11043 | int bpp, i; |
@@ -10941,21 +11050,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, | |||
10941 | else | 11050 | else |
10942 | bpp = 8*3; | 11051 | bpp = 8*3; |
10943 | 11052 | ||
10944 | |||
10945 | pipe_config->pipe_bpp = bpp; | 11053 | pipe_config->pipe_bpp = bpp; |
10946 | 11054 | ||
10947 | state = pipe_config->base.state; | 11055 | /* Clamp display bpp to connector max bpp */ |
10948 | |||
10949 | /* Clamp display bpp to EDID value */ | ||
10950 | for_each_new_connector_in_state(state, connector, connector_state, i) { | 11056 | for_each_new_connector_in_state(state, connector, connector_state, i) { |
11057 | int ret; | ||
11058 | |||
10951 | if (connector_state->crtc != &crtc->base) | 11059 | if (connector_state->crtc != &crtc->base) |
10952 | continue; | 11060 | continue; |
10953 | 11061 | ||
10954 | connected_sink_compute_bpp(to_intel_connector(connector), | 11062 | ret = compute_sink_pipe_bpp(connector_state, pipe_config); |
10955 | pipe_config); | 11063 | if (ret) |
11064 | return ret; | ||
10956 | } | 11065 | } |
10957 | 11066 | ||
10958 | return bpp; | 11067 | return 0; |
10959 | } | 11068 | } |
10960 | 11069 | ||
10961 | static void intel_dump_crtc_timings(const struct drm_display_mode *mode) | 11070 | static void intel_dump_crtc_timings(const struct drm_display_mode *mode) |
@@ -11025,6 +11134,20 @@ static void snprintf_output_types(char *buf, size_t len, | |||
11025 | WARN_ON_ONCE(output_types != 0); | 11134 | WARN_ON_ONCE(output_types != 0); |
11026 | } | 11135 | } |
11027 | 11136 | ||
11137 | static const char * const output_format_str[] = { | ||
11138 | [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid", | ||
11139 | [INTEL_OUTPUT_FORMAT_RGB] = "RGB", | ||
11140 | [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0", | ||
11141 | [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4", | ||
11142 | }; | ||
11143 | |||
11144 | static const char *output_formats(enum intel_output_format format) | ||
11145 | { | ||
11146 | if (format >= ARRAY_SIZE(output_format_str)) | ||
11147 | format = INTEL_OUTPUT_FORMAT_INVALID; | ||
11148 | return output_format_str[format]; | ||
11149 | } | ||
11150 | |||
11028 | static void intel_dump_pipe_config(struct intel_crtc *crtc, | 11151 | static void intel_dump_pipe_config(struct intel_crtc *crtc, |
11029 | struct intel_crtc_state *pipe_config, | 11152 | struct intel_crtc_state *pipe_config, |
11030 | const char *context) | 11153 | const char *context) |
@@ -11044,6 +11167,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
11044 | DRM_DEBUG_KMS("output_types: %s (0x%x)\n", | 11167 | DRM_DEBUG_KMS("output_types: %s (0x%x)\n", |
11045 | buf, pipe_config->output_types); | 11168 | buf, pipe_config->output_types); |
11046 | 11169 | ||
11170 | DRM_DEBUG_KMS("output format: %s\n", | ||
11171 | output_formats(pipe_config->output_format)); | ||
11172 | |||
11047 | DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", | 11173 | DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", |
11048 | transcoder_name(pipe_config->cpu_transcoder), | 11174 | transcoder_name(pipe_config->cpu_transcoder), |
11049 | pipe_config->pipe_bpp, pipe_config->dither); | 11175 | pipe_config->pipe_bpp, pipe_config->dither); |
@@ -11053,9 +11179,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
11053 | pipe_config->fdi_lanes, | 11179 | pipe_config->fdi_lanes, |
11054 | &pipe_config->fdi_m_n); | 11180 | &pipe_config->fdi_m_n); |
11055 | 11181 | ||
11056 | if (pipe_config->ycbcr420) | ||
11057 | DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n"); | ||
11058 | |||
11059 | if (intel_crtc_has_dp_encoder(pipe_config)) { | 11182 | if (intel_crtc_has_dp_encoder(pipe_config)) { |
11060 | intel_dump_m_n_config(pipe_config, "dp m_n", | 11183 | intel_dump_m_n_config(pipe_config, "dp m_n", |
11061 | pipe_config->lane_count, &pipe_config->dp_m_n); | 11184 | pipe_config->lane_count, &pipe_config->dp_m_n); |
@@ -11244,7 +11367,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
11244 | struct intel_encoder *encoder; | 11367 | struct intel_encoder *encoder; |
11245 | struct drm_connector *connector; | 11368 | struct drm_connector *connector; |
11246 | struct drm_connector_state *connector_state; | 11369 | struct drm_connector_state *connector_state; |
11247 | int base_bpp, ret = -EINVAL; | 11370 | int base_bpp, ret; |
11248 | int i; | 11371 | int i; |
11249 | bool retry = true; | 11372 | bool retry = true; |
11250 | 11373 | ||
@@ -11266,10 +11389,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
11266 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) | 11389 | (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) |
11267 | pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; | 11390 | pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; |
11268 | 11391 | ||
11269 | base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), | 11392 | ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc), |
11270 | pipe_config); | 11393 | pipe_config); |
11271 | if (base_bpp < 0) | 11394 | if (ret) |
11272 | goto fail; | 11395 | return ret; |
11396 | |||
11397 | base_bpp = pipe_config->pipe_bpp; | ||
11273 | 11398 | ||
11274 | /* | 11399 | /* |
11275 | * Determine the real pipe dimensions. Note that stereo modes can | 11400 | * Determine the real pipe dimensions. Note that stereo modes can |
@@ -11291,7 +11416,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
11291 | 11416 | ||
11292 | if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { | 11417 | if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { |
11293 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); | 11418 | DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); |
11294 | goto fail; | 11419 | return -EINVAL; |
11295 | } | 11420 | } |
11296 | 11421 | ||
11297 | /* | 11422 | /* |
@@ -11327,7 +11452,7 @@ encoder_retry: | |||
11327 | 11452 | ||
11328 | if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { | 11453 | if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { |
11329 | DRM_DEBUG_KMS("Encoder config failure\n"); | 11454 | DRM_DEBUG_KMS("Encoder config failure\n"); |
11330 | goto fail; | 11455 | return -EINVAL; |
11331 | } | 11456 | } |
11332 | } | 11457 | } |
11333 | 11458 | ||
@@ -11338,16 +11463,16 @@ encoder_retry: | |||
11338 | * pipe_config->pixel_multiplier; | 11463 | * pipe_config->pixel_multiplier; |
11339 | 11464 | ||
11340 | ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); | 11465 | ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); |
11466 | if (ret == -EDEADLK) | ||
11467 | return ret; | ||
11341 | if (ret < 0) { | 11468 | if (ret < 0) { |
11342 | DRM_DEBUG_KMS("CRTC fixup failed\n"); | 11469 | DRM_DEBUG_KMS("CRTC fixup failed\n"); |
11343 | goto fail; | 11470 | return ret; |
11344 | } | 11471 | } |
11345 | 11472 | ||
11346 | if (ret == RETRY) { | 11473 | if (ret == RETRY) { |
11347 | if (WARN(!retry, "loop in pipe configuration computation\n")) { | 11474 | if (WARN(!retry, "loop in pipe configuration computation\n")) |
11348 | ret = -EINVAL; | 11475 | return -EINVAL; |
11349 | goto fail; | ||
11350 | } | ||
11351 | 11476 | ||
11352 | DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); | 11477 | DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); |
11353 | retry = false; | 11478 | retry = false; |
@@ -11363,8 +11488,7 @@ encoder_retry: | |||
11363 | DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", | 11488 | DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", |
11364 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); | 11489 | base_bpp, pipe_config->pipe_bpp, pipe_config->dither); |
11365 | 11490 | ||
11366 | fail: | 11491 | return 0; |
11367 | return ret; | ||
11368 | } | 11492 | } |
11369 | 11493 | ||
11370 | static bool intel_fuzzy_clock_check(int clock1, int clock2) | 11494 | static bool intel_fuzzy_clock_check(int clock1, int clock2) |
@@ -11633,6 +11757,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
11633 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); | 11757 | PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); |
11634 | 11758 | ||
11635 | PIPE_CONF_CHECK_I(pixel_multiplier); | 11759 | PIPE_CONF_CHECK_I(pixel_multiplier); |
11760 | PIPE_CONF_CHECK_I(output_format); | ||
11636 | PIPE_CONF_CHECK_BOOL(has_hdmi_sink); | 11761 | PIPE_CONF_CHECK_BOOL(has_hdmi_sink); |
11637 | if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || | 11762 | if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || |
11638 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 11763 | IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
@@ -11641,7 +11766,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, | |||
11641 | PIPE_CONF_CHECK_BOOL(hdmi_scrambling); | 11766 | PIPE_CONF_CHECK_BOOL(hdmi_scrambling); |
11642 | PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); | 11767 | PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); |
11643 | PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe); | 11768 | PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe); |
11644 | PIPE_CONF_CHECK_BOOL(ycbcr420); | ||
11645 | 11769 | ||
11646 | PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); | 11770 | PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); |
11647 | 11771 | ||
@@ -12150,8 +12274,9 @@ intel_modeset_verify_disabled(struct drm_device *dev, | |||
12150 | verify_disabled_dpll_state(dev); | 12274 | verify_disabled_dpll_state(dev); |
12151 | } | 12275 | } |
12152 | 12276 | ||
12153 | static void update_scanline_offset(struct intel_crtc *crtc) | 12277 | static void update_scanline_offset(const struct intel_crtc_state *crtc_state) |
12154 | { | 12278 | { |
12279 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
12155 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 12280 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
12156 | 12281 | ||
12157 | /* | 12282 | /* |
@@ -12182,7 +12307,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) | |||
12182 | * answer that's slightly in the future. | 12307 | * answer that's slightly in the future. |
12183 | */ | 12308 | */ |
12184 | if (IS_GEN2(dev_priv)) { | 12309 | if (IS_GEN2(dev_priv)) { |
12185 | const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; | 12310 | const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; |
12186 | int vtotal; | 12311 | int vtotal; |
12187 | 12312 | ||
12188 | vtotal = adjusted_mode->crtc_vtotal; | 12313 | vtotal = adjusted_mode->crtc_vtotal; |
@@ -12191,7 +12316,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) | |||
12191 | 12316 | ||
12192 | crtc->scanline_offset = vtotal - 1; | 12317 | crtc->scanline_offset = vtotal - 1; |
12193 | } else if (HAS_DDI(dev_priv) && | 12318 | } else if (HAS_DDI(dev_priv) && |
12194 | intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { | 12319 | intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { |
12195 | crtc->scanline_offset = 2; | 12320 | crtc->scanline_offset = 2; |
12196 | } else | 12321 | } else |
12197 | crtc->scanline_offset = 1; | 12322 | crtc->scanline_offset = 1; |
@@ -12474,6 +12599,8 @@ static int intel_atomic_check(struct drm_device *dev, | |||
12474 | } | 12599 | } |
12475 | 12600 | ||
12476 | ret = intel_modeset_pipe_config(crtc, pipe_config); | 12601 | ret = intel_modeset_pipe_config(crtc, pipe_config); |
12602 | if (ret == -EDEADLK) | ||
12603 | return ret; | ||
12477 | if (ret) { | 12604 | if (ret) { |
12478 | intel_dump_pipe_config(to_intel_crtc(crtc), | 12605 | intel_dump_pipe_config(to_intel_crtc(crtc), |
12479 | pipe_config, "[failed]"); | 12606 | pipe_config, "[failed]"); |
@@ -12505,6 +12632,10 @@ static int intel_atomic_check(struct drm_device *dev, | |||
12505 | intel_state->cdclk.logical = dev_priv->cdclk.logical; | 12632 | intel_state->cdclk.logical = dev_priv->cdclk.logical; |
12506 | } | 12633 | } |
12507 | 12634 | ||
12635 | ret = icl_add_linked_planes(intel_state); | ||
12636 | if (ret) | ||
12637 | return ret; | ||
12638 | |||
12508 | ret = drm_atomic_helper_check_planes(dev, state); | 12639 | ret = drm_atomic_helper_check_planes(dev, state); |
12509 | if (ret) | 12640 | if (ret) |
12510 | return ret; | 12641 | return ret; |
@@ -12537,6 +12668,7 @@ static void intel_update_crtc(struct drm_crtc *crtc, | |||
12537 | struct drm_device *dev = crtc->dev; | 12668 | struct drm_device *dev = crtc->dev; |
12538 | struct drm_i915_private *dev_priv = to_i915(dev); | 12669 | struct drm_i915_private *dev_priv = to_i915(dev); |
12539 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 12670 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
12671 | struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state); | ||
12540 | struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); | 12672 | struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); |
12541 | bool modeset = needs_modeset(new_crtc_state); | 12673 | bool modeset = needs_modeset(new_crtc_state); |
12542 | struct intel_plane_state *new_plane_state = | 12674 | struct intel_plane_state *new_plane_state = |
@@ -12544,7 +12676,7 @@ static void intel_update_crtc(struct drm_crtc *crtc, | |||
12544 | to_intel_plane(crtc->primary)); | 12676 | to_intel_plane(crtc->primary)); |
12545 | 12677 | ||
12546 | if (modeset) { | 12678 | if (modeset) { |
12547 | update_scanline_offset(intel_crtc); | 12679 | update_scanline_offset(pipe_config); |
12548 | dev_priv->display.crtc_enable(pipe_config, state); | 12680 | dev_priv->display.crtc_enable(pipe_config, state); |
12549 | 12681 | ||
12550 | /* vblanks work again, re-enable pipe CRC. */ | 12682 | /* vblanks work again, re-enable pipe CRC. */ |
@@ -12557,7 +12689,12 @@ static void intel_update_crtc(struct drm_crtc *crtc, | |||
12557 | if (new_plane_state) | 12689 | if (new_plane_state) |
12558 | intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); | 12690 | intel_fbc_enable(intel_crtc, pipe_config, new_plane_state); |
12559 | 12691 | ||
12560 | drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); | 12692 | intel_begin_crtc_commit(crtc, old_crtc_state); |
12693 | |||
12694 | intel_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc, | ||
12695 | old_intel_cstate, pipe_config); | ||
12696 | |||
12697 | intel_finish_crtc_commit(crtc, old_crtc_state); | ||
12561 | } | 12698 | } |
12562 | 12699 | ||
12563 | static void intel_update_crtcs(struct drm_atomic_state *state) | 12700 | static void intel_update_crtcs(struct drm_atomic_state *state) |
@@ -12589,13 +12726,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state) | |||
12589 | int i; | 12726 | int i; |
12590 | u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; | 12727 | u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; |
12591 | u8 required_slices = intel_state->wm_results.ddb.enabled_slices; | 12728 | u8 required_slices = intel_state->wm_results.ddb.enabled_slices; |
12592 | 12729 | struct skl_ddb_entry entries[I915_MAX_PIPES] = {}; | |
12593 | const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; | ||
12594 | 12730 | ||
12595 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) | 12731 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) |
12596 | /* ignore allocations for crtc's that have been turned off. */ | 12732 | /* ignore allocations for crtc's that have been turned off. */ |
12597 | if (new_crtc_state->active) | 12733 | if (new_crtc_state->active) |
12598 | entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; | 12734 | entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; |
12599 | 12735 | ||
12600 | /* If 2nd DBuf slice required, enable it here */ | 12736 | /* If 2nd DBuf slice required, enable it here */ |
12601 | if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) | 12737 | if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) |
@@ -12621,14 +12757,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state) | |||
12621 | if (updated & cmask || !cstate->base.active) | 12757 | if (updated & cmask || !cstate->base.active) |
12622 | continue; | 12758 | continue; |
12623 | 12759 | ||
12624 | if (skl_ddb_allocation_overlaps(dev_priv, | 12760 | if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb, |
12625 | entries, | 12761 | entries, |
12626 | &cstate->wm.skl.ddb, | 12762 | INTEL_INFO(dev_priv)->num_pipes, i)) |
12627 | i)) | ||
12628 | continue; | 12763 | continue; |
12629 | 12764 | ||
12630 | updated |= cmask; | 12765 | updated |= cmask; |
12631 | entries[i] = &cstate->wm.skl.ddb; | 12766 | entries[i] = cstate->wm.skl.ddb; |
12632 | 12767 | ||
12633 | /* | 12768 | /* |
12634 | * If this is an already active pipe, it's DDB changed, | 12769 | * If this is an already active pipe, it's DDB changed, |
@@ -12718,8 +12853,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12718 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 12853 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
12719 | struct drm_i915_private *dev_priv = to_i915(dev); | 12854 | struct drm_i915_private *dev_priv = to_i915(dev); |
12720 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 12855 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
12856 | struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state; | ||
12721 | struct drm_crtc *crtc; | 12857 | struct drm_crtc *crtc; |
12722 | struct intel_crtc_state *intel_cstate; | 12858 | struct intel_crtc *intel_crtc; |
12723 | u64 put_domains[I915_MAX_PIPES] = {}; | 12859 | u64 put_domains[I915_MAX_PIPES] = {}; |
12724 | int i; | 12860 | int i; |
12725 | 12861 | ||
@@ -12731,24 +12867,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12731 | intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); | 12867 | intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); |
12732 | 12868 | ||
12733 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 12869 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
12734 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 12870 | old_intel_crtc_state = to_intel_crtc_state(old_crtc_state); |
12871 | new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); | ||
12872 | intel_crtc = to_intel_crtc(crtc); | ||
12735 | 12873 | ||
12736 | if (needs_modeset(new_crtc_state) || | 12874 | if (needs_modeset(new_crtc_state) || |
12737 | to_intel_crtc_state(new_crtc_state)->update_pipe) { | 12875 | to_intel_crtc_state(new_crtc_state)->update_pipe) { |
12738 | 12876 | ||
12739 | put_domains[to_intel_crtc(crtc)->pipe] = | 12877 | put_domains[intel_crtc->pipe] = |
12740 | modeset_get_crtc_power_domains(crtc, | 12878 | modeset_get_crtc_power_domains(crtc, |
12741 | to_intel_crtc_state(new_crtc_state)); | 12879 | new_intel_crtc_state); |
12742 | } | 12880 | } |
12743 | 12881 | ||
12744 | if (!needs_modeset(new_crtc_state)) | 12882 | if (!needs_modeset(new_crtc_state)) |
12745 | continue; | 12883 | continue; |
12746 | 12884 | ||
12747 | intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), | 12885 | intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state); |
12748 | to_intel_crtc_state(new_crtc_state)); | ||
12749 | 12886 | ||
12750 | if (old_crtc_state->active) { | 12887 | if (old_crtc_state->active) { |
12751 | intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); | 12888 | intel_crtc_disable_planes(intel_crtc, old_intel_crtc_state->active_planes); |
12752 | 12889 | ||
12753 | /* | 12890 | /* |
12754 | * We need to disable pipe CRC before disabling the pipe, | 12891 | * We need to disable pipe CRC before disabling the pipe, |
@@ -12756,10 +12893,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12756 | */ | 12893 | */ |
12757 | intel_crtc_disable_pipe_crc(intel_crtc); | 12894 | intel_crtc_disable_pipe_crc(intel_crtc); |
12758 | 12895 | ||
12759 | dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); | 12896 | dev_priv->display.crtc_disable(old_intel_crtc_state, state); |
12760 | intel_crtc->active = false; | 12897 | intel_crtc->active = false; |
12761 | intel_fbc_disable(intel_crtc); | 12898 | intel_fbc_disable(intel_crtc); |
12762 | intel_disable_shared_dpll(intel_crtc); | 12899 | intel_disable_shared_dpll(old_intel_crtc_state); |
12763 | 12900 | ||
12764 | /* | 12901 | /* |
12765 | * Underruns don't always raise | 12902 | * Underruns don't always raise |
@@ -12768,17 +12905,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12768 | intel_check_cpu_fifo_underruns(dev_priv); | 12905 | intel_check_cpu_fifo_underruns(dev_priv); |
12769 | intel_check_pch_fifo_underruns(dev_priv); | 12906 | intel_check_pch_fifo_underruns(dev_priv); |
12770 | 12907 | ||
12771 | if (!new_crtc_state->active) { | 12908 | /* FIXME unify this for all platforms */ |
12772 | /* | 12909 | if (!new_crtc_state->active && |
12773 | * Make sure we don't call initial_watermarks | 12910 | !HAS_GMCH_DISPLAY(dev_priv) && |
12774 | * for ILK-style watermark updates. | 12911 | dev_priv->display.initial_watermarks) |
12775 | * | 12912 | dev_priv->display.initial_watermarks(intel_state, |
12776 | * No clue what this is supposed to achieve. | 12913 | new_intel_crtc_state); |
12777 | */ | ||
12778 | if (INTEL_GEN(dev_priv) >= 9) | ||
12779 | dev_priv->display.initial_watermarks(intel_state, | ||
12780 | to_intel_crtc_state(new_crtc_state)); | ||
12781 | } | ||
12782 | } | 12914 | } |
12783 | } | 12915 | } |
12784 | 12916 | ||
@@ -12837,11 +12969,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
12837 | * TODO: Move this (and other cleanup) to an async worker eventually. | 12969 | * TODO: Move this (and other cleanup) to an async worker eventually. |
12838 | */ | 12970 | */ |
12839 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { | 12971 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
12840 | intel_cstate = to_intel_crtc_state(new_crtc_state); | 12972 | new_intel_crtc_state = to_intel_crtc_state(new_crtc_state); |
12841 | 12973 | ||
12842 | if (dev_priv->display.optimize_watermarks) | 12974 | if (dev_priv->display.optimize_watermarks) |
12843 | dev_priv->display.optimize_watermarks(intel_state, | 12975 | dev_priv->display.optimize_watermarks(intel_state, |
12844 | intel_cstate); | 12976 | new_intel_crtc_state); |
12845 | } | 12977 | } |
12846 | 12978 | ||
12847 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { | 12979 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
@@ -13224,13 +13356,12 @@ intel_prepare_plane_fb(struct drm_plane *plane, | |||
13224 | 13356 | ||
13225 | ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); | 13357 | ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); |
13226 | 13358 | ||
13227 | fb_obj_bump_render_priority(obj); | ||
13228 | |||
13229 | mutex_unlock(&dev_priv->drm.struct_mutex); | 13359 | mutex_unlock(&dev_priv->drm.struct_mutex); |
13230 | i915_gem_object_unpin_pages(obj); | 13360 | i915_gem_object_unpin_pages(obj); |
13231 | if (ret) | 13361 | if (ret) |
13232 | return ret; | 13362 | return ret; |
13233 | 13363 | ||
13364 | fb_obj_bump_render_priority(obj); | ||
13234 | intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); | 13365 | intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); |
13235 | 13366 | ||
13236 | if (!new_state->fence) { /* implicit fencing */ | 13367 | if (!new_state->fence) { /* implicit fencing */ |
@@ -13361,7 +13492,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, | |||
13361 | if (intel_cstate->update_pipe) | 13492 | if (intel_cstate->update_pipe) |
13362 | intel_update_pipe_config(old_intel_cstate, intel_cstate); | 13493 | intel_update_pipe_config(old_intel_cstate, intel_cstate); |
13363 | else if (INTEL_GEN(dev_priv) >= 9) | 13494 | else if (INTEL_GEN(dev_priv) >= 9) |
13364 | skl_detach_scalers(intel_crtc); | 13495 | skl_detach_scalers(intel_cstate); |
13365 | 13496 | ||
13366 | out: | 13497 | out: |
13367 | if (dev_priv->display.atomic_update_watermarks) | 13498 | if (dev_priv->display.atomic_update_watermarks) |
@@ -13463,56 +13594,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane, | |||
13463 | } | 13594 | } |
13464 | } | 13595 | } |
13465 | 13596 | ||
13466 | static bool skl_plane_format_mod_supported(struct drm_plane *_plane, | ||
13467 | u32 format, u64 modifier) | ||
13468 | { | ||
13469 | struct intel_plane *plane = to_intel_plane(_plane); | ||
13470 | |||
13471 | switch (modifier) { | ||
13472 | case DRM_FORMAT_MOD_LINEAR: | ||
13473 | case I915_FORMAT_MOD_X_TILED: | ||
13474 | case I915_FORMAT_MOD_Y_TILED: | ||
13475 | case I915_FORMAT_MOD_Yf_TILED: | ||
13476 | break; | ||
13477 | case I915_FORMAT_MOD_Y_TILED_CCS: | ||
13478 | case I915_FORMAT_MOD_Yf_TILED_CCS: | ||
13479 | if (!plane->has_ccs) | ||
13480 | return false; | ||
13481 | break; | ||
13482 | default: | ||
13483 | return false; | ||
13484 | } | ||
13485 | |||
13486 | switch (format) { | ||
13487 | case DRM_FORMAT_XRGB8888: | ||
13488 | case DRM_FORMAT_XBGR8888: | ||
13489 | case DRM_FORMAT_ARGB8888: | ||
13490 | case DRM_FORMAT_ABGR8888: | ||
13491 | if (is_ccs_modifier(modifier)) | ||
13492 | return true; | ||
13493 | /* fall through */ | ||
13494 | case DRM_FORMAT_RGB565: | ||
13495 | case DRM_FORMAT_XRGB2101010: | ||
13496 | case DRM_FORMAT_XBGR2101010: | ||
13497 | case DRM_FORMAT_YUYV: | ||
13498 | case DRM_FORMAT_YVYU: | ||
13499 | case DRM_FORMAT_UYVY: | ||
13500 | case DRM_FORMAT_VYUY: | ||
13501 | case DRM_FORMAT_NV12: | ||
13502 | if (modifier == I915_FORMAT_MOD_Yf_TILED) | ||
13503 | return true; | ||
13504 | /* fall through */ | ||
13505 | case DRM_FORMAT_C8: | ||
13506 | if (modifier == DRM_FORMAT_MOD_LINEAR || | ||
13507 | modifier == I915_FORMAT_MOD_X_TILED || | ||
13508 | modifier == I915_FORMAT_MOD_Y_TILED) | ||
13509 | return true; | ||
13510 | /* fall through */ | ||
13511 | default: | ||
13512 | return false; | ||
13513 | } | ||
13514 | } | ||
13515 | |||
13516 | static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, | 13597 | static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, |
13517 | u32 format, u64 modifier) | 13598 | u32 format, u64 modifier) |
13518 | { | 13599 | { |
@@ -13520,18 +13601,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, | |||
13520 | format == DRM_FORMAT_ARGB8888; | 13601 | format == DRM_FORMAT_ARGB8888; |
13521 | } | 13602 | } |
13522 | 13603 | ||
13523 | static struct drm_plane_funcs skl_plane_funcs = { | 13604 | static const struct drm_plane_funcs i965_plane_funcs = { |
13524 | .update_plane = drm_atomic_helper_update_plane, | ||
13525 | .disable_plane = drm_atomic_helper_disable_plane, | ||
13526 | .destroy = intel_plane_destroy, | ||
13527 | .atomic_get_property = intel_plane_atomic_get_property, | ||
13528 | .atomic_set_property = intel_plane_atomic_set_property, | ||
13529 | .atomic_duplicate_state = intel_plane_duplicate_state, | ||
13530 | .atomic_destroy_state = intel_plane_destroy_state, | ||
13531 | .format_mod_supported = skl_plane_format_mod_supported, | ||
13532 | }; | ||
13533 | |||
13534 | static struct drm_plane_funcs i965_plane_funcs = { | ||
13535 | .update_plane = drm_atomic_helper_update_plane, | 13605 | .update_plane = drm_atomic_helper_update_plane, |
13536 | .disable_plane = drm_atomic_helper_disable_plane, | 13606 | .disable_plane = drm_atomic_helper_disable_plane, |
13537 | .destroy = intel_plane_destroy, | 13607 | .destroy = intel_plane_destroy, |
@@ -13542,7 +13612,7 @@ static struct drm_plane_funcs i965_plane_funcs = { | |||
13542 | .format_mod_supported = i965_plane_format_mod_supported, | 13612 | .format_mod_supported = i965_plane_format_mod_supported, |
13543 | }; | 13613 | }; |
13544 | 13614 | ||
13545 | static struct drm_plane_funcs i8xx_plane_funcs = { | 13615 | static const struct drm_plane_funcs i8xx_plane_funcs = { |
13546 | .update_plane = drm_atomic_helper_update_plane, | 13616 | .update_plane = drm_atomic_helper_update_plane, |
13547 | .disable_plane = drm_atomic_helper_disable_plane, | 13617 | .disable_plane = drm_atomic_helper_disable_plane, |
13548 | .destroy = intel_plane_destroy, | 13618 | .destroy = intel_plane_destroy, |
@@ -13568,14 +13638,16 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13568 | struct drm_plane_state *old_plane_state, *new_plane_state; | 13638 | struct drm_plane_state *old_plane_state, *new_plane_state; |
13569 | struct intel_plane *intel_plane = to_intel_plane(plane); | 13639 | struct intel_plane *intel_plane = to_intel_plane(plane); |
13570 | struct drm_framebuffer *old_fb; | 13640 | struct drm_framebuffer *old_fb; |
13571 | struct drm_crtc_state *crtc_state = crtc->state; | 13641 | struct intel_crtc_state *crtc_state = |
13642 | to_intel_crtc_state(crtc->state); | ||
13643 | struct intel_crtc_state *new_crtc_state; | ||
13572 | 13644 | ||
13573 | /* | 13645 | /* |
13574 | * When crtc is inactive or there is a modeset pending, | 13646 | * When crtc is inactive or there is a modeset pending, |
13575 | * wait for it to complete in the slowpath | 13647 | * wait for it to complete in the slowpath |
13576 | */ | 13648 | */ |
13577 | if (!crtc_state->active || needs_modeset(crtc_state) || | 13649 | if (!crtc_state->base.active || needs_modeset(&crtc_state->base) || |
13578 | to_intel_crtc_state(crtc_state)->update_pipe) | 13650 | crtc_state->update_pipe) |
13579 | goto slow; | 13651 | goto slow; |
13580 | 13652 | ||
13581 | old_plane_state = plane->state; | 13653 | old_plane_state = plane->state; |
@@ -13605,6 +13677,12 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13605 | if (!new_plane_state) | 13677 | if (!new_plane_state) |
13606 | return -ENOMEM; | 13678 | return -ENOMEM; |
13607 | 13679 | ||
13680 | new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); | ||
13681 | if (!new_crtc_state) { | ||
13682 | ret = -ENOMEM; | ||
13683 | goto out_free; | ||
13684 | } | ||
13685 | |||
13608 | drm_atomic_set_fb_for_plane(new_plane_state, fb); | 13686 | drm_atomic_set_fb_for_plane(new_plane_state, fb); |
13609 | 13687 | ||
13610 | new_plane_state->src_x = src_x; | 13688 | new_plane_state->src_x = src_x; |
@@ -13616,9 +13694,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13616 | new_plane_state->crtc_w = crtc_w; | 13694 | new_plane_state->crtc_w = crtc_w; |
13617 | new_plane_state->crtc_h = crtc_h; | 13695 | new_plane_state->crtc_h = crtc_h; |
13618 | 13696 | ||
13619 | ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), | 13697 | ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, |
13620 | to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ | 13698 | to_intel_plane_state(old_plane_state), |
13621 | to_intel_plane_state(plane->state), | ||
13622 | to_intel_plane_state(new_plane_state)); | 13699 | to_intel_plane_state(new_plane_state)); |
13623 | if (ret) | 13700 | if (ret) |
13624 | goto out_free; | 13701 | goto out_free; |
@@ -13640,10 +13717,21 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13640 | /* Swap plane state */ | 13717 | /* Swap plane state */ |
13641 | plane->state = new_plane_state; | 13718 | plane->state = new_plane_state; |
13642 | 13719 | ||
13720 | /* | ||
13721 | * We cannot swap crtc_state as it may be in use by an atomic commit or | ||
13722 | * page flip that's running simultaneously. If we swap crtc_state and | ||
13723 | * destroy the old state, we will cause a use-after-free there. | ||
13724 | * | ||
13725 | * Only update active_planes, which is needed for our internal | ||
13726 | * bookkeeping. Either value will do the right thing when updating | ||
13727 | * planes atomically. If the cursor was part of the atomic update then | ||
13728 | * we would have taken the slowpath. | ||
13729 | */ | ||
13730 | crtc_state->active_planes = new_crtc_state->active_planes; | ||
13731 | |||
13643 | if (plane->state->visible) { | 13732 | if (plane->state->visible) { |
13644 | trace_intel_update_plane(plane, to_intel_crtc(crtc)); | 13733 | trace_intel_update_plane(plane, to_intel_crtc(crtc)); |
13645 | intel_plane->update_plane(intel_plane, | 13734 | intel_plane->update_plane(intel_plane, crtc_state, |
13646 | to_intel_crtc_state(crtc->state), | ||
13647 | to_intel_plane_state(plane->state)); | 13735 | to_intel_plane_state(plane->state)); |
13648 | } else { | 13736 | } else { |
13649 | trace_intel_disable_plane(plane, to_intel_crtc(crtc)); | 13737 | trace_intel_disable_plane(plane, to_intel_crtc(crtc)); |
@@ -13655,6 +13743,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, | |||
13655 | out_unlock: | 13743 | out_unlock: |
13656 | mutex_unlock(&dev_priv->drm.struct_mutex); | 13744 | mutex_unlock(&dev_priv->drm.struct_mutex); |
13657 | out_free: | 13745 | out_free: |
13746 | if (new_crtc_state) | ||
13747 | intel_crtc_destroy_state(crtc, &new_crtc_state->base); | ||
13658 | if (ret) | 13748 | if (ret) |
13659 | intel_plane_destroy_state(plane, new_plane_state); | 13749 | intel_plane_destroy_state(plane, new_plane_state); |
13660 | else | 13750 | else |
@@ -13695,176 +13785,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, | |||
13695 | return i9xx_plane == PLANE_A; | 13785 | return i9xx_plane == PLANE_A; |
13696 | } | 13786 | } |
13697 | 13787 | ||
13698 | static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, | ||
13699 | enum pipe pipe, enum plane_id plane_id) | ||
13700 | { | ||
13701 | if (!HAS_FBC(dev_priv)) | ||
13702 | return false; | ||
13703 | |||
13704 | return pipe == PIPE_A && plane_id == PLANE_PRIMARY; | ||
13705 | } | ||
13706 | |||
13707 | bool skl_plane_has_planar(struct drm_i915_private *dev_priv, | ||
13708 | enum pipe pipe, enum plane_id plane_id) | ||
13709 | { | ||
13710 | /* | ||
13711 | * FIXME: ICL requires two hardware planes for scanning out NV12 | ||
13712 | * framebuffers. Do not advertize support until this is implemented. | ||
13713 | */ | ||
13714 | if (INTEL_GEN(dev_priv) >= 11) | ||
13715 | return false; | ||
13716 | |||
13717 | if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) | ||
13718 | return false; | ||
13719 | |||
13720 | if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) | ||
13721 | return false; | ||
13722 | |||
13723 | if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) | ||
13724 | return false; | ||
13725 | |||
13726 | return true; | ||
13727 | } | ||
13728 | |||
13729 | static struct intel_plane * | 13788 | static struct intel_plane * |
13730 | intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) | 13789 | intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) |
13731 | { | 13790 | { |
13732 | struct intel_plane *primary = NULL; | 13791 | struct intel_plane *plane; |
13733 | struct intel_plane_state *state = NULL; | ||
13734 | const struct drm_plane_funcs *plane_funcs; | 13792 | const struct drm_plane_funcs *plane_funcs; |
13735 | const uint32_t *intel_primary_formats; | ||
13736 | unsigned int supported_rotations; | 13793 | unsigned int supported_rotations; |
13737 | unsigned int num_formats; | 13794 | unsigned int possible_crtcs; |
13738 | const uint64_t *modifiers; | 13795 | const u64 *modifiers; |
13796 | const u32 *formats; | ||
13797 | int num_formats; | ||
13739 | int ret; | 13798 | int ret; |
13740 | 13799 | ||
13741 | primary = kzalloc(sizeof(*primary), GFP_KERNEL); | 13800 | if (INTEL_GEN(dev_priv) >= 9) |
13742 | if (!primary) { | 13801 | return skl_universal_plane_create(dev_priv, pipe, |
13743 | ret = -ENOMEM; | 13802 | PLANE_PRIMARY); |
13744 | goto fail; | ||
13745 | } | ||
13746 | |||
13747 | state = intel_create_plane_state(&primary->base); | ||
13748 | if (!state) { | ||
13749 | ret = -ENOMEM; | ||
13750 | goto fail; | ||
13751 | } | ||
13752 | 13803 | ||
13753 | primary->base.state = &state->base; | 13804 | plane = intel_plane_alloc(); |
13805 | if (IS_ERR(plane)) | ||
13806 | return plane; | ||
13754 | 13807 | ||
13755 | if (INTEL_GEN(dev_priv) >= 9) | 13808 | plane->pipe = pipe; |
13756 | state->scaler_id = -1; | ||
13757 | primary->pipe = pipe; | ||
13758 | /* | 13809 | /* |
13759 | * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS | 13810 | * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS |
13760 | * port is hooked to pipe B. Hence we want plane A feeding pipe B. | 13811 | * port is hooked to pipe B. Hence we want plane A feeding pipe B. |
13761 | */ | 13812 | */ |
13762 | if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) | 13813 | if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) |
13763 | primary->i9xx_plane = (enum i9xx_plane_id) !pipe; | 13814 | plane->i9xx_plane = (enum i9xx_plane_id) !pipe; |
13764 | else | 13815 | else |
13765 | primary->i9xx_plane = (enum i9xx_plane_id) pipe; | 13816 | plane->i9xx_plane = (enum i9xx_plane_id) pipe; |
13766 | primary->id = PLANE_PRIMARY; | 13817 | plane->id = PLANE_PRIMARY; |
13767 | primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id); | 13818 | plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); |
13768 | 13819 | ||
13769 | if (INTEL_GEN(dev_priv) >= 9) | 13820 | plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); |
13770 | primary->has_fbc = skl_plane_has_fbc(dev_priv, | 13821 | if (plane->has_fbc) { |
13771 | primary->pipe, | ||
13772 | primary->id); | ||
13773 | else | ||
13774 | primary->has_fbc = i9xx_plane_has_fbc(dev_priv, | ||
13775 | primary->i9xx_plane); | ||
13776 | |||
13777 | if (primary->has_fbc) { | ||
13778 | struct intel_fbc *fbc = &dev_priv->fbc; | 13822 | struct intel_fbc *fbc = &dev_priv->fbc; |
13779 | 13823 | ||
13780 | fbc->possible_framebuffer_bits |= primary->frontbuffer_bit; | 13824 | fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; |
13781 | } | 13825 | } |
13782 | 13826 | ||
13783 | if (INTEL_GEN(dev_priv) >= 9) { | 13827 | if (INTEL_GEN(dev_priv) >= 4) { |
13784 | primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe, | 13828 | formats = i965_primary_formats; |
13785 | PLANE_PRIMARY); | ||
13786 | |||
13787 | if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) { | ||
13788 | intel_primary_formats = skl_pri_planar_formats; | ||
13789 | num_formats = ARRAY_SIZE(skl_pri_planar_formats); | ||
13790 | } else { | ||
13791 | intel_primary_formats = skl_primary_formats; | ||
13792 | num_formats = ARRAY_SIZE(skl_primary_formats); | ||
13793 | } | ||
13794 | |||
13795 | if (primary->has_ccs) | ||
13796 | modifiers = skl_format_modifiers_ccs; | ||
13797 | else | ||
13798 | modifiers = skl_format_modifiers_noccs; | ||
13799 | |||
13800 | primary->max_stride = skl_plane_max_stride; | ||
13801 | primary->update_plane = skl_update_plane; | ||
13802 | primary->disable_plane = skl_disable_plane; | ||
13803 | primary->get_hw_state = skl_plane_get_hw_state; | ||
13804 | primary->check_plane = skl_plane_check; | ||
13805 | |||
13806 | plane_funcs = &skl_plane_funcs; | ||
13807 | } else if (INTEL_GEN(dev_priv) >= 4) { | ||
13808 | intel_primary_formats = i965_primary_formats; | ||
13809 | num_formats = ARRAY_SIZE(i965_primary_formats); | 13829 | num_formats = ARRAY_SIZE(i965_primary_formats); |
13810 | modifiers = i9xx_format_modifiers; | 13830 | modifiers = i9xx_format_modifiers; |
13811 | 13831 | ||
13812 | primary->max_stride = i9xx_plane_max_stride; | 13832 | plane->max_stride = i9xx_plane_max_stride; |
13813 | primary->update_plane = i9xx_update_plane; | 13833 | plane->update_plane = i9xx_update_plane; |
13814 | primary->disable_plane = i9xx_disable_plane; | 13834 | plane->disable_plane = i9xx_disable_plane; |
13815 | primary->get_hw_state = i9xx_plane_get_hw_state; | 13835 | plane->get_hw_state = i9xx_plane_get_hw_state; |
13816 | primary->check_plane = i9xx_plane_check; | 13836 | plane->check_plane = i9xx_plane_check; |
13817 | 13837 | ||
13818 | plane_funcs = &i965_plane_funcs; | 13838 | plane_funcs = &i965_plane_funcs; |
13819 | } else { | 13839 | } else { |
13820 | intel_primary_formats = i8xx_primary_formats; | 13840 | formats = i8xx_primary_formats; |
13821 | num_formats = ARRAY_SIZE(i8xx_primary_formats); | 13841 | num_formats = ARRAY_SIZE(i8xx_primary_formats); |
13822 | modifiers = i9xx_format_modifiers; | 13842 | modifiers = i9xx_format_modifiers; |
13823 | 13843 | ||
13824 | primary->max_stride = i9xx_plane_max_stride; | 13844 | plane->max_stride = i9xx_plane_max_stride; |
13825 | primary->update_plane = i9xx_update_plane; | 13845 | plane->update_plane = i9xx_update_plane; |
13826 | primary->disable_plane = i9xx_disable_plane; | 13846 | plane->disable_plane = i9xx_disable_plane; |
13827 | primary->get_hw_state = i9xx_plane_get_hw_state; | 13847 | plane->get_hw_state = i9xx_plane_get_hw_state; |
13828 | primary->check_plane = i9xx_plane_check; | 13848 | plane->check_plane = i9xx_plane_check; |
13829 | 13849 | ||
13830 | plane_funcs = &i8xx_plane_funcs; | 13850 | plane_funcs = &i8xx_plane_funcs; |
13831 | } | 13851 | } |
13832 | 13852 | ||
13833 | if (INTEL_GEN(dev_priv) >= 9) | 13853 | possible_crtcs = BIT(pipe); |
13834 | ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, | 13854 | |
13835 | 0, plane_funcs, | 13855 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) |
13836 | intel_primary_formats, num_formats, | 13856 | ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, |
13837 | modifiers, | 13857 | possible_crtcs, plane_funcs, |
13838 | DRM_PLANE_TYPE_PRIMARY, | 13858 | formats, num_formats, modifiers, |
13839 | "plane 1%c", pipe_name(pipe)); | ||
13840 | else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) | ||
13841 | ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, | ||
13842 | 0, plane_funcs, | ||
13843 | intel_primary_formats, num_formats, | ||
13844 | modifiers, | ||
13845 | DRM_PLANE_TYPE_PRIMARY, | 13859 | DRM_PLANE_TYPE_PRIMARY, |
13846 | "primary %c", pipe_name(pipe)); | 13860 | "primary %c", pipe_name(pipe)); |
13847 | else | 13861 | else |
13848 | ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, | 13862 | ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, |
13849 | 0, plane_funcs, | 13863 | possible_crtcs, plane_funcs, |
13850 | intel_primary_formats, num_formats, | 13864 | formats, num_formats, modifiers, |
13851 | modifiers, | ||
13852 | DRM_PLANE_TYPE_PRIMARY, | 13865 | DRM_PLANE_TYPE_PRIMARY, |
13853 | "plane %c", | 13866 | "plane %c", |
13854 | plane_name(primary->i9xx_plane)); | 13867 | plane_name(plane->i9xx_plane)); |
13855 | if (ret) | 13868 | if (ret) |
13856 | goto fail; | 13869 | goto fail; |
13857 | 13870 | ||
13858 | if (INTEL_GEN(dev_priv) >= 10) { | 13871 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { |
13859 | supported_rotations = | ||
13860 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | | ||
13861 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 | | ||
13862 | DRM_MODE_REFLECT_X; | ||
13863 | } else if (INTEL_GEN(dev_priv) >= 9) { | ||
13864 | supported_rotations = | ||
13865 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | | ||
13866 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; | ||
13867 | } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { | ||
13868 | supported_rotations = | 13872 | supported_rotations = |
13869 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | | 13873 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | |
13870 | DRM_MODE_REFLECT_X; | 13874 | DRM_MODE_REFLECT_X; |
@@ -13876,26 +13880,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
13876 | } | 13880 | } |
13877 | 13881 | ||
13878 | if (INTEL_GEN(dev_priv) >= 4) | 13882 | if (INTEL_GEN(dev_priv) >= 4) |
13879 | drm_plane_create_rotation_property(&primary->base, | 13883 | drm_plane_create_rotation_property(&plane->base, |
13880 | DRM_MODE_ROTATE_0, | 13884 | DRM_MODE_ROTATE_0, |
13881 | supported_rotations); | 13885 | supported_rotations); |
13882 | 13886 | ||
13883 | if (INTEL_GEN(dev_priv) >= 9) | 13887 | drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); |
13884 | drm_plane_create_color_properties(&primary->base, | ||
13885 | BIT(DRM_COLOR_YCBCR_BT601) | | ||
13886 | BIT(DRM_COLOR_YCBCR_BT709), | ||
13887 | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | | ||
13888 | BIT(DRM_COLOR_YCBCR_FULL_RANGE), | ||
13889 | DRM_COLOR_YCBCR_BT709, | ||
13890 | DRM_COLOR_YCBCR_LIMITED_RANGE); | ||
13891 | |||
13892 | drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); | ||
13893 | 13888 | ||
13894 | return primary; | 13889 | return plane; |
13895 | 13890 | ||
13896 | fail: | 13891 | fail: |
13897 | kfree(state); | 13892 | intel_plane_free(plane); |
13898 | kfree(primary); | ||
13899 | 13893 | ||
13900 | return ERR_PTR(ret); | 13894 | return ERR_PTR(ret); |
13901 | } | 13895 | } |
@@ -13904,23 +13898,13 @@ static struct intel_plane * | |||
13904 | intel_cursor_plane_create(struct drm_i915_private *dev_priv, | 13898 | intel_cursor_plane_create(struct drm_i915_private *dev_priv, |
13905 | enum pipe pipe) | 13899 | enum pipe pipe) |
13906 | { | 13900 | { |
13907 | struct intel_plane *cursor = NULL; | 13901 | unsigned int possible_crtcs; |
13908 | struct intel_plane_state *state = NULL; | 13902 | struct intel_plane *cursor; |
13909 | int ret; | 13903 | int ret; |
13910 | 13904 | ||
13911 | cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); | 13905 | cursor = intel_plane_alloc(); |
13912 | if (!cursor) { | 13906 | if (IS_ERR(cursor)) |
13913 | ret = -ENOMEM; | 13907 | return cursor; |
13914 | goto fail; | ||
13915 | } | ||
13916 | |||
13917 | state = intel_create_plane_state(&cursor->base); | ||
13918 | if (!state) { | ||
13919 | ret = -ENOMEM; | ||
13920 | goto fail; | ||
13921 | } | ||
13922 | |||
13923 | cursor->base.state = &state->base; | ||
13924 | 13908 | ||
13925 | cursor->pipe = pipe; | 13909 | cursor->pipe = pipe; |
13926 | cursor->i9xx_plane = (enum i9xx_plane_id) pipe; | 13910 | cursor->i9xx_plane = (enum i9xx_plane_id) pipe; |
@@ -13947,8 +13931,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, | |||
13947 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) | 13931 | if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) |
13948 | cursor->cursor.size = ~0; | 13932 | cursor->cursor.size = ~0; |
13949 | 13933 | ||
13934 | possible_crtcs = BIT(pipe); | ||
13935 | |||
13950 | ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, | 13936 | ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, |
13951 | 0, &intel_cursor_plane_funcs, | 13937 | possible_crtcs, &intel_cursor_plane_funcs, |
13952 | intel_cursor_formats, | 13938 | intel_cursor_formats, |
13953 | ARRAY_SIZE(intel_cursor_formats), | 13939 | ARRAY_SIZE(intel_cursor_formats), |
13954 | cursor_format_modifiers, | 13940 | cursor_format_modifiers, |
@@ -13963,16 +13949,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, | |||
13963 | DRM_MODE_ROTATE_0 | | 13949 | DRM_MODE_ROTATE_0 | |
13964 | DRM_MODE_ROTATE_180); | 13950 | DRM_MODE_ROTATE_180); |
13965 | 13951 | ||
13966 | if (INTEL_GEN(dev_priv) >= 9) | ||
13967 | state->scaler_id = -1; | ||
13968 | |||
13969 | drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); | 13952 | drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); |
13970 | 13953 | ||
13971 | return cursor; | 13954 | return cursor; |
13972 | 13955 | ||
13973 | fail: | 13956 | fail: |
13974 | kfree(state); | 13957 | intel_plane_free(cursor); |
13975 | kfree(cursor); | ||
13976 | 13958 | ||
13977 | return ERR_PTR(ret); | 13959 | return ERR_PTR(ret); |
13978 | } | 13960 | } |
@@ -13993,7 +13975,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, | |||
13993 | struct intel_scaler *scaler = &scaler_state->scalers[i]; | 13975 | struct intel_scaler *scaler = &scaler_state->scalers[i]; |
13994 | 13976 | ||
13995 | scaler->in_use = 0; | 13977 | scaler->in_use = 0; |
13996 | scaler->mode = PS_SCALER_MODE_DYN; | 13978 | scaler->mode = 0; |
13997 | } | 13979 | } |
13998 | 13980 | ||
13999 | scaler_state->scaler_id = -1; | 13981 | scaler_state->scaler_id = -1; |
@@ -14088,18 +14070,6 @@ fail: | |||
14088 | return ret; | 14070 | return ret; |
14089 | } | 14071 | } |
14090 | 14072 | ||
14091 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) | ||
14092 | { | ||
14093 | struct drm_device *dev = connector->base.dev; | ||
14094 | |||
14095 | WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); | ||
14096 | |||
14097 | if (!connector->base.state->crtc) | ||
14098 | return INVALID_PIPE; | ||
14099 | |||
14100 | return to_intel_crtc(connector->base.state->crtc)->pipe; | ||
14101 | } | ||
14102 | |||
14103 | int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, | 14073 | int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, |
14104 | struct drm_file *file) | 14074 | struct drm_file *file) |
14105 | { | 14075 | { |
@@ -14236,6 +14206,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) | |||
14236 | intel_ddi_init(dev_priv, PORT_D); | 14206 | intel_ddi_init(dev_priv, PORT_D); |
14237 | intel_ddi_init(dev_priv, PORT_E); | 14207 | intel_ddi_init(dev_priv, PORT_E); |
14238 | intel_ddi_init(dev_priv, PORT_F); | 14208 | intel_ddi_init(dev_priv, PORT_F); |
14209 | icl_dsi_init(dev_priv); | ||
14239 | } else if (IS_GEN9_LP(dev_priv)) { | 14210 | } else if (IS_GEN9_LP(dev_priv)) { |
14240 | /* | 14211 | /* |
14241 | * FIXME: Broxton doesn't support port detection via the | 14212 | * FIXME: Broxton doesn't support port detection via the |
@@ -14458,7 +14429,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { | |||
14458 | 14429 | ||
14459 | static | 14430 | static |
14460 | u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, | 14431 | u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, |
14461 | uint64_t fb_modifier, uint32_t pixel_format) | 14432 | u32 pixel_format, u64 fb_modifier) |
14462 | { | 14433 | { |
14463 | struct intel_crtc *crtc; | 14434 | struct intel_crtc *crtc; |
14464 | struct intel_plane *plane; | 14435 | struct intel_plane *plane; |
@@ -14526,13 +14497,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14526 | goto err; | 14497 | goto err; |
14527 | } | 14498 | } |
14528 | /* fall through */ | 14499 | /* fall through */ |
14529 | case I915_FORMAT_MOD_Y_TILED: | ||
14530 | case I915_FORMAT_MOD_Yf_TILED: | 14500 | case I915_FORMAT_MOD_Yf_TILED: |
14501 | if (mode_cmd->pixel_format == DRM_FORMAT_C8) { | ||
14502 | DRM_DEBUG_KMS("Indexed format does not support Yf tiling\n"); | ||
14503 | goto err; | ||
14504 | } | ||
14505 | /* fall through */ | ||
14506 | case I915_FORMAT_MOD_Y_TILED: | ||
14531 | if (INTEL_GEN(dev_priv) < 9) { | 14507 | if (INTEL_GEN(dev_priv) < 9) { |
14532 | DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", | 14508 | DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", |
14533 | mode_cmd->modifier[0]); | 14509 | mode_cmd->modifier[0]); |
14534 | goto err; | 14510 | goto err; |
14535 | } | 14511 | } |
14512 | break; | ||
14536 | case DRM_FORMAT_MOD_LINEAR: | 14513 | case DRM_FORMAT_MOD_LINEAR: |
14537 | case I915_FORMAT_MOD_X_TILED: | 14514 | case I915_FORMAT_MOD_X_TILED: |
14538 | break; | 14515 | break; |
@@ -14552,8 +14529,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14552 | goto err; | 14529 | goto err; |
14553 | } | 14530 | } |
14554 | 14531 | ||
14555 | pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], | 14532 | pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format, |
14556 | mode_cmd->pixel_format); | 14533 | mode_cmd->modifier[0]); |
14557 | if (mode_cmd->pitches[0] > pitch_limit) { | 14534 | if (mode_cmd->pitches[0] > pitch_limit) { |
14558 | DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", | 14535 | DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", |
14559 | mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? | 14536 | mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? |
@@ -14622,7 +14599,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14622 | break; | 14599 | break; |
14623 | case DRM_FORMAT_NV12: | 14600 | case DRM_FORMAT_NV12: |
14624 | if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || | 14601 | if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) || |
14625 | IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) { | 14602 | IS_BROXTON(dev_priv)) { |
14626 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", | 14603 | DRM_DEBUG_KMS("unsupported pixel format: %s\n", |
14627 | drm_get_format_name(mode_cmd->pixel_format, | 14604 | drm_get_format_name(mode_cmd->pixel_format, |
14628 | &format_name)); | 14605 | &format_name)); |
@@ -14646,7 +14623,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, | |||
14646 | fb->height < SKL_MIN_YUV_420_SRC_H || | 14623 | fb->height < SKL_MIN_YUV_420_SRC_H || |
14647 | (fb->width % 4) != 0 || (fb->height % 4) != 0)) { | 14624 | (fb->width % 4) != 0 || (fb->height % 4) != 0)) { |
14648 | DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); | 14625 | DRM_DEBUG_KMS("src dimensions not correct for NV12\n"); |
14649 | return -EINVAL; | 14626 | goto err; |
14650 | } | 14627 | } |
14651 | 14628 | ||
14652 | for (i = 0; i < fb->format->num_planes; i++) { | 14629 | for (i = 0; i < fb->format->num_planes; i++) { |
@@ -14906,174 +14883,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) | |||
14906 | dev_priv->display.update_crtcs = intel_update_crtcs; | 14883 | dev_priv->display.update_crtcs = intel_update_crtcs; |
14907 | } | 14884 | } |
14908 | 14885 | ||
14909 | /* | ||
14910 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | ||
14911 | */ | ||
14912 | static void quirk_ssc_force_disable(struct drm_device *dev) | ||
14913 | { | ||
14914 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
14915 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; | ||
14916 | DRM_INFO("applying lvds SSC disable quirk\n"); | ||
14917 | } | ||
14918 | |||
14919 | /* | ||
14920 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight | ||
14921 | * brightness value | ||
14922 | */ | ||
14923 | static void quirk_invert_brightness(struct drm_device *dev) | ||
14924 | { | ||
14925 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
14926 | dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; | ||
14927 | DRM_INFO("applying inverted panel brightness quirk\n"); | ||
14928 | } | ||
14929 | |||
14930 | /* Some VBT's incorrectly indicate no backlight is present */ | ||
14931 | static void quirk_backlight_present(struct drm_device *dev) | ||
14932 | { | ||
14933 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
14934 | dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; | ||
14935 | DRM_INFO("applying backlight present quirk\n"); | ||
14936 | } | ||
14937 | |||
14938 | /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms | ||
14939 | * which is 300 ms greater than eDP spec T12 min. | ||
14940 | */ | ||
14941 | static void quirk_increase_t12_delay(struct drm_device *dev) | ||
14942 | { | ||
14943 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
14944 | |||
14945 | dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY; | ||
14946 | DRM_INFO("Applying T12 delay quirk\n"); | ||
14947 | } | ||
14948 | |||
14949 | /* | ||
14950 | * GeminiLake NUC HDMI outputs require additional off time | ||
14951 | * this allows the onboard retimer to correctly sync to signal | ||
14952 | */ | ||
14953 | static void quirk_increase_ddi_disabled_time(struct drm_device *dev) | ||
14954 | { | ||
14955 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
14956 | |||
14957 | dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; | ||
14958 | DRM_INFO("Applying Increase DDI Disabled quirk\n"); | ||
14959 | } | ||
14960 | |||
14961 | struct intel_quirk { | ||
14962 | int device; | ||
14963 | int subsystem_vendor; | ||
14964 | int subsystem_device; | ||
14965 | void (*hook)(struct drm_device *dev); | ||
14966 | }; | ||
14967 | |||
14968 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ | ||
14969 | struct intel_dmi_quirk { | ||
14970 | void (*hook)(struct drm_device *dev); | ||
14971 | const struct dmi_system_id (*dmi_id_list)[]; | ||
14972 | }; | ||
14973 | |||
14974 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) | ||
14975 | { | ||
14976 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); | ||
14977 | return 1; | ||
14978 | } | ||
14979 | |||
14980 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { | ||
14981 | { | ||
14982 | .dmi_id_list = &(const struct dmi_system_id[]) { | ||
14983 | { | ||
14984 | .callback = intel_dmi_reverse_brightness, | ||
14985 | .ident = "NCR Corporation", | ||
14986 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), | ||
14987 | DMI_MATCH(DMI_PRODUCT_NAME, ""), | ||
14988 | }, | ||
14989 | }, | ||
14990 | { } /* terminating entry */ | ||
14991 | }, | ||
14992 | .hook = quirk_invert_brightness, | ||
14993 | }, | ||
14994 | }; | ||
14995 | |||
14996 | static struct intel_quirk intel_quirks[] = { | ||
14997 | /* Lenovo U160 cannot use SSC on LVDS */ | ||
14998 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | ||
14999 | |||
15000 | /* Sony Vaio Y cannot use SSC on LVDS */ | ||
15001 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | ||
15002 | |||
15003 | /* Acer Aspire 5734Z must invert backlight brightness */ | ||
15004 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, | ||
15005 | |||
15006 | /* Acer/eMachines G725 */ | ||
15007 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, | ||
15008 | |||
15009 | /* Acer/eMachines e725 */ | ||
15010 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | ||
15011 | |||
15012 | /* Acer/Packard Bell NCL20 */ | ||
15013 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | ||
15014 | |||
15015 | /* Acer Aspire 4736Z */ | ||
15016 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | ||
15017 | |||
15018 | /* Acer Aspire 5336 */ | ||
15019 | { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, | ||
15020 | |||
15021 | /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ | ||
15022 | { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, | ||
15023 | |||
15024 | /* Acer C720 Chromebook (Core i3 4005U) */ | ||
15025 | { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, | ||
15026 | |||
15027 | /* Apple Macbook 2,1 (Core 2 T7400) */ | ||
15028 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, | ||
15029 | |||
15030 | /* Apple Macbook 4,1 */ | ||
15031 | { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, | ||
15032 | |||
15033 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ | ||
15034 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, | ||
15035 | |||
15036 | /* HP Chromebook 14 (Celeron 2955U) */ | ||
15037 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, | ||
15038 | |||
15039 | /* Dell Chromebook 11 */ | ||
15040 | { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, | ||
15041 | |||
15042 | /* Dell Chromebook 11 (2015 version) */ | ||
15043 | { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, | ||
15044 | |||
15045 | /* Toshiba Satellite P50-C-18C */ | ||
15046 | { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, | ||
15047 | |||
15048 | /* GeminiLake NUC */ | ||
15049 | { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, | ||
15050 | { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, | ||
15051 | /* ASRock ITX*/ | ||
15052 | { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, | ||
15053 | { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, | ||
15054 | }; | ||
15055 | |||
15056 | static void intel_init_quirks(struct drm_device *dev) | ||
15057 | { | ||
15058 | struct pci_dev *d = dev->pdev; | ||
15059 | int i; | ||
15060 | |||
15061 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
15062 | struct intel_quirk *q = &intel_quirks[i]; | ||
15063 | |||
15064 | if (d->device == q->device && | ||
15065 | (d->subsystem_vendor == q->subsystem_vendor || | ||
15066 | q->subsystem_vendor == PCI_ANY_ID) && | ||
15067 | (d->subsystem_device == q->subsystem_device || | ||
15068 | q->subsystem_device == PCI_ANY_ID)) | ||
15069 | q->hook(dev); | ||
15070 | } | ||
15071 | for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { | ||
15072 | if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) | ||
15073 | intel_dmi_quirks[i].hook(dev); | ||
15074 | } | ||
15075 | } | ||
15076 | |||
15077 | /* Disable the VGA plane that we never use */ | 14886 | /* Disable the VGA plane that we never use */ |
15078 | static void i915_disable_vga(struct drm_i915_private *dev_priv) | 14887 | static void i915_disable_vga(struct drm_i915_private *dev_priv) |
15079 | { | 14888 | { |
@@ -15233,6 +15042,14 @@ retry: | |||
15233 | ret = drm_atomic_add_affected_planes(state, crtc); | 15042 | ret = drm_atomic_add_affected_planes(state, crtc); |
15234 | if (ret) | 15043 | if (ret) |
15235 | goto out; | 15044 | goto out; |
15045 | |||
15046 | /* | ||
15047 | * FIXME hack to force a LUT update to avoid the | ||
15048 | * plane update forcing the pipe gamma on without | ||
15049 | * having a proper LUT loaded. Remove once we | ||
15050 | * have readout for pipe gamma enable. | ||
15051 | */ | ||
15052 | crtc_state->color_mgmt_changed = true; | ||
15236 | } | 15053 | } |
15237 | } | 15054 | } |
15238 | 15055 | ||
@@ -15279,7 +15096,9 @@ int intel_modeset_init(struct drm_device *dev) | |||
15279 | INIT_WORK(&dev_priv->atomic_helper.free_work, | 15096 | INIT_WORK(&dev_priv->atomic_helper.free_work, |
15280 | intel_atomic_helper_free_state_worker); | 15097 | intel_atomic_helper_free_state_worker); |
15281 | 15098 | ||
15282 | intel_init_quirks(dev); | 15099 | intel_init_quirks(dev_priv); |
15100 | |||
15101 | intel_fbc_init(dev_priv); | ||
15283 | 15102 | ||
15284 | intel_init_pm(dev_priv); | 15103 | intel_init_pm(dev_priv); |
15285 | 15104 | ||
@@ -15511,8 +15330,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) | |||
15511 | if (pipe == crtc->pipe) | 15330 | if (pipe == crtc->pipe) |
15512 | continue; | 15331 | continue; |
15513 | 15332 | ||
15514 | DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", | 15333 | DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n", |
15515 | plane->base.name); | 15334 | plane->base.base.id, plane->base.name); |
15516 | 15335 | ||
15517 | plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); | 15336 | plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
15518 | intel_plane_disable_noatomic(plane_crtc, plane); | 15337 | intel_plane_disable_noatomic(plane_crtc, plane); |
@@ -15553,7 +15372,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15553 | { | 15372 | { |
15554 | struct drm_device *dev = crtc->base.dev; | 15373 | struct drm_device *dev = crtc->base.dev; |
15555 | struct drm_i915_private *dev_priv = to_i915(dev); | 15374 | struct drm_i915_private *dev_priv = to_i915(dev); |
15556 | enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; | 15375 | struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); |
15376 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | ||
15557 | 15377 | ||
15558 | /* Clear any frame start delays used for debugging left by the BIOS */ | 15378 | /* Clear any frame start delays used for debugging left by the BIOS */ |
15559 | if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { | 15379 | if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) { |
@@ -15563,7 +15383,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15563 | I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); | 15383 | I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); |
15564 | } | 15384 | } |
15565 | 15385 | ||
15566 | if (crtc->active) { | 15386 | if (crtc_state->base.active) { |
15567 | struct intel_plane *plane; | 15387 | struct intel_plane *plane; |
15568 | 15388 | ||
15569 | /* Disable everything but the primary plane */ | 15389 | /* Disable everything but the primary plane */ |
@@ -15579,10 +15399,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15579 | 15399 | ||
15580 | /* Adjust the state of the output pipe according to whether we | 15400 | /* Adjust the state of the output pipe according to whether we |
15581 | * have active connectors/encoders. */ | 15401 | * have active connectors/encoders. */ |
15582 | if (crtc->active && !intel_crtc_has_encoders(crtc)) | 15402 | if (crtc_state->base.active && !intel_crtc_has_encoders(crtc)) |
15583 | intel_crtc_disable_noatomic(&crtc->base, ctx); | 15403 | intel_crtc_disable_noatomic(&crtc->base, ctx); |
15584 | 15404 | ||
15585 | if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { | 15405 | if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) { |
15586 | /* | 15406 | /* |
15587 | * We start out with underrun reporting disabled to avoid races. | 15407 | * We start out with underrun reporting disabled to avoid races. |
15588 | * For correct bookkeeping mark this on active crtcs. | 15408 | * For correct bookkeeping mark this on active crtcs. |
@@ -15613,6 +15433,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15613 | 15433 | ||
15614 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15434 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
15615 | { | 15435 | { |
15436 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
15616 | struct intel_connector *connector; | 15437 | struct intel_connector *connector; |
15617 | 15438 | ||
15618 | /* We need to check both for a crtc link (meaning that the | 15439 | /* We need to check both for a crtc link (meaning that the |
@@ -15636,7 +15457,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
15636 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15457 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
15637 | encoder->base.base.id, | 15458 | encoder->base.base.id, |
15638 | encoder->base.name); | 15459 | encoder->base.name); |
15639 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15460 | if (encoder->disable) |
15461 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | ||
15640 | if (encoder->post_disable) | 15462 | if (encoder->post_disable) |
15641 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15463 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); |
15642 | } | 15464 | } |
@@ -15653,6 +15475,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
15653 | 15475 | ||
15654 | /* notify opregion of the sanitized encoder state */ | 15476 | /* notify opregion of the sanitized encoder state */ |
15655 | intel_opregion_notify_encoder(encoder, connector && has_active_crtc); | 15477 | intel_opregion_notify_encoder(encoder, connector && has_active_crtc); |
15478 | |||
15479 | if (INTEL_GEN(dev_priv) >= 11) | ||
15480 | icl_sanitize_encoder_pll_mapping(encoder); | ||
15656 | } | 15481 | } |
15657 | 15482 | ||
15658 | void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) | 15483 | void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) |
@@ -15701,6 +15526,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) | |||
15701 | crtc_state = to_intel_crtc_state(crtc->base.state); | 15526 | crtc_state = to_intel_crtc_state(crtc->base.state); |
15702 | 15527 | ||
15703 | intel_set_plane_visible(crtc_state, plane_state, visible); | 15528 | intel_set_plane_visible(crtc_state, plane_state, visible); |
15529 | |||
15530 | DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n", | ||
15531 | plane->base.base.id, plane->base.name, | ||
15532 | enableddisabled(visible), pipe_name(pipe)); | ||
15704 | } | 15533 | } |
15705 | 15534 | ||
15706 | for_each_intel_crtc(&dev_priv->drm, crtc) { | 15535 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
@@ -15853,7 +15682,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) | |||
15853 | 15682 | ||
15854 | drm_calc_timestamping_constants(&crtc->base, | 15683 | drm_calc_timestamping_constants(&crtc->base, |
15855 | &crtc_state->base.adjusted_mode); | 15684 | &crtc_state->base.adjusted_mode); |
15856 | update_scanline_offset(crtc); | 15685 | update_scanline_offset(crtc_state); |
15857 | } | 15686 | } |
15858 | 15687 | ||
15859 | dev_priv->min_cdclk[crtc->pipe] = min_cdclk; | 15688 | dev_priv->min_cdclk[crtc->pipe] = min_cdclk; |
@@ -15908,6 +15737,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv) | |||
15908 | } | 15737 | } |
15909 | } | 15738 | } |
15910 | 15739 | ||
15740 | static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv, | ||
15741 | enum port port, i915_reg_t hdmi_reg) | ||
15742 | { | ||
15743 | u32 val = I915_READ(hdmi_reg); | ||
15744 | |||
15745 | if (val & SDVO_ENABLE || | ||
15746 | (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A)) | ||
15747 | return; | ||
15748 | |||
15749 | DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n", | ||
15750 | port_name(port)); | ||
15751 | |||
15752 | val &= ~SDVO_PIPE_SEL_MASK; | ||
15753 | val |= SDVO_PIPE_SEL(PIPE_A); | ||
15754 | |||
15755 | I915_WRITE(hdmi_reg, val); | ||
15756 | } | ||
15757 | |||
15758 | static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv, | ||
15759 | enum port port, i915_reg_t dp_reg) | ||
15760 | { | ||
15761 | u32 val = I915_READ(dp_reg); | ||
15762 | |||
15763 | if (val & DP_PORT_EN || | ||
15764 | (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A)) | ||
15765 | return; | ||
15766 | |||
15767 | DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n", | ||
15768 | port_name(port)); | ||
15769 | |||
15770 | val &= ~DP_PIPE_SEL_MASK; | ||
15771 | val |= DP_PIPE_SEL(PIPE_A); | ||
15772 | |||
15773 | I915_WRITE(dp_reg, val); | ||
15774 | } | ||
15775 | |||
15776 | static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv) | ||
15777 | { | ||
15778 | /* | ||
15779 | * The BIOS may select transcoder B on some of the PCH | ||
15780 | * ports even it doesn't enable the port. This would trip | ||
15781 | * assert_pch_dp_disabled() and assert_pch_hdmi_disabled(). | ||
15782 | * Sanitize the transcoder select bits to prevent that. We | ||
15783 | * assume that the BIOS never actually enabled the port, | ||
15784 | * because if it did we'd actually have to toggle the port | ||
15785 | * on and back off to make the transcoder A select stick | ||
15786 | * (see. intel_dp_link_down(), intel_disable_hdmi(), | ||
15787 | * intel_disable_sdvo()). | ||
15788 | */ | ||
15789 | ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B); | ||
15790 | ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C); | ||
15791 | ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D); | ||
15792 | |||
15793 | /* PCH SDVOB multiplex with HDMIB */ | ||
15794 | ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB); | ||
15795 | ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC); | ||
15796 | ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID); | ||
15797 | } | ||
15798 | |||
15911 | /* Scan out the current hw modeset state, | 15799 | /* Scan out the current hw modeset state, |
15912 | * and sanitizes it to the current state | 15800 | * and sanitizes it to the current state |
15913 | */ | 15801 | */ |
@@ -15917,6 +15805,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, | |||
15917 | { | 15805 | { |
15918 | struct drm_i915_private *dev_priv = to_i915(dev); | 15806 | struct drm_i915_private *dev_priv = to_i915(dev); |
15919 | struct intel_crtc *crtc; | 15807 | struct intel_crtc *crtc; |
15808 | struct intel_crtc_state *crtc_state; | ||
15920 | struct intel_encoder *encoder; | 15809 | struct intel_encoder *encoder; |
15921 | int i; | 15810 | int i; |
15922 | 15811 | ||
@@ -15928,6 +15817,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev, | |||
15928 | /* HW state is read out, now we need to sanitize this mess. */ | 15817 | /* HW state is read out, now we need to sanitize this mess. */ |
15929 | get_encoder_power_domains(dev_priv); | 15818 | get_encoder_power_domains(dev_priv); |
15930 | 15819 | ||
15820 | if (HAS_PCH_IBX(dev_priv)) | ||
15821 | ibx_sanitize_pch_ports(dev_priv); | ||
15822 | |||
15931 | /* | 15823 | /* |
15932 | * intel_sanitize_plane_mapping() may need to do vblank | 15824 | * intel_sanitize_plane_mapping() may need to do vblank |
15933 | * waits, so we need vblank interrupts restored beforehand. | 15825 | * waits, so we need vblank interrupts restored beforehand. |
@@ -15935,7 +15827,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, | |||
15935 | for_each_intel_crtc(&dev_priv->drm, crtc) { | 15827 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
15936 | drm_crtc_vblank_reset(&crtc->base); | 15828 | drm_crtc_vblank_reset(&crtc->base); |
15937 | 15829 | ||
15938 | if (crtc->active) | 15830 | if (crtc->base.state->active) |
15939 | drm_crtc_vblank_on(&crtc->base); | 15831 | drm_crtc_vblank_on(&crtc->base); |
15940 | } | 15832 | } |
15941 | 15833 | ||
@@ -15945,8 +15837,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev, | |||
15945 | intel_sanitize_encoder(encoder); | 15837 | intel_sanitize_encoder(encoder); |
15946 | 15838 | ||
15947 | for_each_intel_crtc(&dev_priv->drm, crtc) { | 15839 | for_each_intel_crtc(&dev_priv->drm, crtc) { |
15840 | crtc_state = to_intel_crtc_state(crtc->base.state); | ||
15948 | intel_sanitize_crtc(crtc, ctx); | 15841 | intel_sanitize_crtc(crtc, ctx); |
15949 | intel_dump_pipe_config(crtc, crtc->config, | 15842 | intel_dump_pipe_config(crtc, crtc_state, |
15950 | "[setup_hw_state]"); | 15843 | "[setup_hw_state]"); |
15951 | } | 15844 | } |
15952 | 15845 | ||
@@ -15980,7 +15873,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev, | |||
15980 | for_each_intel_crtc(dev, crtc) { | 15873 | for_each_intel_crtc(dev, crtc) { |
15981 | u64 put_domains; | 15874 | u64 put_domains; |
15982 | 15875 | ||
15983 | put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); | 15876 | crtc_state = to_intel_crtc_state(crtc->base.state); |
15877 | put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state); | ||
15984 | if (WARN_ON(put_domains)) | 15878 | if (WARN_ON(put_domains)) |
15985 | modeset_put_power_domains(dev_priv, put_domains); | 15879 | modeset_put_power_domains(dev_priv, put_domains); |
15986 | } | 15880 | } |
@@ -16024,29 +15918,6 @@ void intel_display_resume(struct drm_device *dev) | |||
16024 | drm_atomic_state_put(state); | 15918 | drm_atomic_state_put(state); |
16025 | } | 15919 | } |
16026 | 15920 | ||
16027 | int intel_connector_register(struct drm_connector *connector) | ||
16028 | { | ||
16029 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
16030 | int ret; | ||
16031 | |||
16032 | ret = intel_backlight_device_register(intel_connector); | ||
16033 | if (ret) | ||
16034 | goto err; | ||
16035 | |||
16036 | return 0; | ||
16037 | |||
16038 | err: | ||
16039 | return ret; | ||
16040 | } | ||
16041 | |||
16042 | void intel_connector_unregister(struct drm_connector *connector) | ||
16043 | { | ||
16044 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
16045 | |||
16046 | intel_backlight_device_unregister(intel_connector); | ||
16047 | intel_panel_destroy_backlight(connector); | ||
16048 | } | ||
16049 | |||
16050 | static void intel_hpd_poll_fini(struct drm_device *dev) | 15921 | static void intel_hpd_poll_fini(struct drm_device *dev) |
16051 | { | 15922 | { |
16052 | struct intel_connector *connector; | 15923 | struct intel_connector *connector; |
@@ -16057,9 +15928,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev) | |||
16057 | for_each_intel_connector_iter(connector, &conn_iter) { | 15928 | for_each_intel_connector_iter(connector, &conn_iter) { |
16058 | if (connector->modeset_retry_work.func) | 15929 | if (connector->modeset_retry_work.func) |
16059 | cancel_work_sync(&connector->modeset_retry_work); | 15930 | cancel_work_sync(&connector->modeset_retry_work); |
16060 | if (connector->hdcp_shim) { | 15931 | if (connector->hdcp.shim) { |
16061 | cancel_delayed_work_sync(&connector->hdcp_check_work); | 15932 | cancel_delayed_work_sync(&connector->hdcp.check_work); |
16062 | cancel_work_sync(&connector->hdcp_prop_work); | 15933 | cancel_work_sync(&connector->hdcp.prop_work); |
16063 | } | 15934 | } |
16064 | } | 15935 | } |
16065 | drm_connector_list_iter_end(&conn_iter); | 15936 | drm_connector_list_iter_end(&conn_iter); |
@@ -16099,18 +15970,13 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
16099 | 15970 | ||
16100 | drm_mode_config_cleanup(dev); | 15971 | drm_mode_config_cleanup(dev); |
16101 | 15972 | ||
16102 | intel_cleanup_overlay(dev_priv); | 15973 | intel_overlay_cleanup(dev_priv); |
16103 | 15974 | ||
16104 | intel_teardown_gmbus(dev_priv); | 15975 | intel_teardown_gmbus(dev_priv); |
16105 | 15976 | ||
16106 | destroy_workqueue(dev_priv->modeset_wq); | 15977 | destroy_workqueue(dev_priv->modeset_wq); |
16107 | } | ||
16108 | 15978 | ||
16109 | void intel_connector_attach_encoder(struct intel_connector *connector, | 15979 | intel_fbc_cleanup_cfb(dev_priv); |
16110 | struct intel_encoder *encoder) | ||
16111 | { | ||
16112 | connector->encoder = encoder; | ||
16113 | drm_connector_attach_encoder(&connector->base, &encoder->base); | ||
16114 | } | 15980 | } |
16115 | 15981 | ||
16116 | /* | 15982 | /* |
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 9fac67e31205..5f2955b944da 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h | |||
@@ -43,6 +43,11 @@ enum i915_gpio { | |||
43 | GPIOM, | 43 | GPIOM, |
44 | }; | 44 | }; |
45 | 45 | ||
46 | /* | ||
47 | * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the | ||
48 | * rest have consecutive values and match the enum values of transcoders | ||
49 | * with a 1:1 transcoder -> pipe mapping. | ||
50 | */ | ||
46 | enum pipe { | 51 | enum pipe { |
47 | INVALID_PIPE = -1, | 52 | INVALID_PIPE = -1, |
48 | 53 | ||
@@ -57,12 +62,25 @@ enum pipe { | |||
57 | #define pipe_name(p) ((p) + 'A') | 62 | #define pipe_name(p) ((p) + 'A') |
58 | 63 | ||
59 | enum transcoder { | 64 | enum transcoder { |
60 | TRANSCODER_A = 0, | 65 | /* |
61 | TRANSCODER_B, | 66 | * The following transcoders have a 1:1 transcoder -> pipe mapping, |
62 | TRANSCODER_C, | 67 | * keep their values fixed: the code assumes that TRANSCODER_A=0, the |
68 | * rest have consecutive values and match the enum values of the pipes | ||
69 | * they map to. | ||
70 | */ | ||
71 | TRANSCODER_A = PIPE_A, | ||
72 | TRANSCODER_B = PIPE_B, | ||
73 | TRANSCODER_C = PIPE_C, | ||
74 | |||
75 | /* | ||
76 | * The following transcoders can map to any pipe, their enum value | ||
77 | * doesn't need to stay fixed. | ||
78 | */ | ||
63 | TRANSCODER_EDP, | 79 | TRANSCODER_EDP, |
64 | TRANSCODER_DSI_A, | 80 | TRANSCODER_DSI_0, |
65 | TRANSCODER_DSI_C, | 81 | TRANSCODER_DSI_1, |
82 | TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */ | ||
83 | TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */ | ||
66 | 84 | ||
67 | I915_MAX_TRANSCODERS | 85 | I915_MAX_TRANSCODERS |
68 | }; | 86 | }; |
@@ -120,6 +138,9 @@ enum plane_id { | |||
120 | PLANE_SPRITE0, | 138 | PLANE_SPRITE0, |
121 | PLANE_SPRITE1, | 139 | PLANE_SPRITE1, |
122 | PLANE_SPRITE2, | 140 | PLANE_SPRITE2, |
141 | PLANE_SPRITE3, | ||
142 | PLANE_SPRITE4, | ||
143 | PLANE_SPRITE5, | ||
123 | PLANE_CURSOR, | 144 | PLANE_CURSOR, |
124 | 145 | ||
125 | I915_MAX_PLANES, | 146 | I915_MAX_PLANES, |
@@ -363,7 +384,7 @@ struct intel_link_m_n { | |||
363 | (__dev_priv)->power_domains.power_well_count; \ | 384 | (__dev_priv)->power_domains.power_well_count; \ |
364 | (__power_well)++) | 385 | (__power_well)++) |
365 | 386 | ||
366 | #define for_each_power_well_rev(__dev_priv, __power_well) \ | 387 | #define for_each_power_well_reverse(__dev_priv, __power_well) \ |
367 | for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ | 388 | for ((__power_well) = (__dev_priv)->power_domains.power_wells + \ |
368 | (__dev_priv)->power_domains.power_well_count - 1; \ | 389 | (__dev_priv)->power_domains.power_well_count - 1; \ |
369 | (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ | 390 | (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \ |
@@ -373,8 +394,8 @@ struct intel_link_m_n { | |||
373 | for_each_power_well(__dev_priv, __power_well) \ | 394 | for_each_power_well(__dev_priv, __power_well) \ |
374 | for_each_if((__power_well)->desc->domains & (__domain_mask)) | 395 | for_each_if((__power_well)->desc->domains & (__domain_mask)) |
375 | 396 | ||
376 | #define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \ | 397 | #define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \ |
377 | for_each_power_well_rev(__dev_priv, __power_well) \ | 398 | for_each_power_well_reverse(__dev_priv, __power_well) \ |
378 | for_each_if((__power_well)->desc->domains & (__domain_mask)) | 399 | for_each_if((__power_well)->desc->domains & (__domain_mask)) |
379 | 400 | ||
380 | #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ | 401 | #define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \ |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 13f9b56a9ce7..7699f9b7b2d2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -45,6 +45,17 @@ | |||
45 | 45 | ||
46 | #define DP_DPRX_ESI_LEN 14 | 46 | #define DP_DPRX_ESI_LEN 14 |
47 | 47 | ||
48 | /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ | ||
49 | #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 | ||
50 | |||
51 | /* DP DSC throughput values used for slice count calculations KPixels/s */ | ||
52 | #define DP_DSC_PEAK_PIXEL_RATE 2720000 | ||
53 | #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 | ||
54 | #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 | ||
55 | |||
56 | /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ | ||
57 | #define DP_DSC_FEC_OVERHEAD_FACTOR 976 | ||
58 | |||
48 | /* Compliance test status bits */ | 59 | /* Compliance test status bits */ |
49 | #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 | 60 | #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 |
50 | #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) | 61 | #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) |
@@ -93,6 +104,14 @@ static const struct dp_link_dpll chv_dpll[] = { | |||
93 | { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, | 104 | { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, |
94 | }; | 105 | }; |
95 | 106 | ||
107 | /* Constants for DP DSC configurations */ | ||
108 | static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; | ||
109 | |||
110 | /* With Single pipe configuration, HW is capable of supporting maximum | ||
111 | * of 4 slices per line. | ||
112 | */ | ||
113 | static const u8 valid_dsc_slicecount[] = {1, 2, 4}; | ||
114 | |||
96 | /** | 115 | /** |
97 | * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) | 116 | * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) |
98 | * @intel_dp: DP struct | 117 | * @intel_dp: DP struct |
@@ -222,138 +241,6 @@ intel_dp_link_required(int pixel_clock, int bpp) | |||
222 | return DIV_ROUND_UP(pixel_clock * bpp, 8); | 241 | return DIV_ROUND_UP(pixel_clock * bpp, 8); |
223 | } | 242 | } |
224 | 243 | ||
225 | void icl_program_mg_dp_mode(struct intel_dp *intel_dp) | ||
226 | { | ||
227 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
228 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | ||
229 | enum port port = intel_dig_port->base.port; | ||
230 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
231 | u32 ln0, ln1, lane_info; | ||
232 | |||
233 | if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) | ||
234 | return; | ||
235 | |||
236 | ln0 = I915_READ(MG_DP_MODE(port, 0)); | ||
237 | ln1 = I915_READ(MG_DP_MODE(port, 1)); | ||
238 | |||
239 | switch (intel_dig_port->tc_type) { | ||
240 | case TC_PORT_TYPEC: | ||
241 | ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); | ||
242 | ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); | ||
243 | |||
244 | lane_info = (I915_READ(PORT_TX_DFLEXDPSP) & | ||
245 | DP_LANE_ASSIGNMENT_MASK(tc_port)) >> | ||
246 | DP_LANE_ASSIGNMENT_SHIFT(tc_port); | ||
247 | |||
248 | switch (lane_info) { | ||
249 | case 0x1: | ||
250 | case 0x4: | ||
251 | break; | ||
252 | case 0x2: | ||
253 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; | ||
254 | break; | ||
255 | case 0x3: | ||
256 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
257 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
258 | break; | ||
259 | case 0x8: | ||
260 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; | ||
261 | break; | ||
262 | case 0xC: | ||
263 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
264 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
265 | break; | ||
266 | case 0xF: | ||
267 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
268 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
269 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | | ||
270 | MG_DP_MODE_CFG_DP_X2_MODE; | ||
271 | break; | ||
272 | default: | ||
273 | MISSING_CASE(lane_info); | ||
274 | } | ||
275 | break; | ||
276 | |||
277 | case TC_PORT_LEGACY: | ||
278 | ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; | ||
279 | ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; | ||
280 | break; | ||
281 | |||
282 | default: | ||
283 | MISSING_CASE(intel_dig_port->tc_type); | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | I915_WRITE(MG_DP_MODE(port, 0), ln0); | ||
288 | I915_WRITE(MG_DP_MODE(port, 1), ln1); | ||
289 | } | ||
290 | |||
291 | void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) | ||
292 | { | ||
293 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
294 | enum port port = dig_port->base.port; | ||
295 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
296 | i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; | ||
297 | u32 val; | ||
298 | int i; | ||
299 | |||
300 | if (tc_port == PORT_TC_NONE) | ||
301 | return; | ||
302 | |||
303 | for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { | ||
304 | val = I915_READ(mg_regs[i]); | ||
305 | val |= MG_DP_MODE_CFG_TR2PWR_GATING | | ||
306 | MG_DP_MODE_CFG_TRPWR_GATING | | ||
307 | MG_DP_MODE_CFG_CLNPWR_GATING | | ||
308 | MG_DP_MODE_CFG_DIGPWR_GATING | | ||
309 | MG_DP_MODE_CFG_GAONPWR_GATING; | ||
310 | I915_WRITE(mg_regs[i], val); | ||
311 | } | ||
312 | |||
313 | val = I915_READ(MG_MISC_SUS0(tc_port)); | ||
314 | val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | | ||
315 | MG_MISC_SUS0_CFG_TR2PWR_GATING | | ||
316 | MG_MISC_SUS0_CFG_CL2PWR_GATING | | ||
317 | MG_MISC_SUS0_CFG_GAONPWR_GATING | | ||
318 | MG_MISC_SUS0_CFG_TRPWR_GATING | | ||
319 | MG_MISC_SUS0_CFG_CL1PWR_GATING | | ||
320 | MG_MISC_SUS0_CFG_DGPWR_GATING; | ||
321 | I915_WRITE(MG_MISC_SUS0(tc_port), val); | ||
322 | } | ||
323 | |||
324 | void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) | ||
325 | { | ||
326 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
327 | enum port port = dig_port->base.port; | ||
328 | enum tc_port tc_port = intel_port_to_tc(dev_priv, port); | ||
329 | i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; | ||
330 | u32 val; | ||
331 | int i; | ||
332 | |||
333 | if (tc_port == PORT_TC_NONE) | ||
334 | return; | ||
335 | |||
336 | for (i = 0; i < ARRAY_SIZE(mg_regs); i++) { | ||
337 | val = I915_READ(mg_regs[i]); | ||
338 | val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | | ||
339 | MG_DP_MODE_CFG_TRPWR_GATING | | ||
340 | MG_DP_MODE_CFG_CLNPWR_GATING | | ||
341 | MG_DP_MODE_CFG_DIGPWR_GATING | | ||
342 | MG_DP_MODE_CFG_GAONPWR_GATING); | ||
343 | I915_WRITE(mg_regs[i], val); | ||
344 | } | ||
345 | |||
346 | val = I915_READ(MG_MISC_SUS0(tc_port)); | ||
347 | val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | | ||
348 | MG_MISC_SUS0_CFG_TR2PWR_GATING | | ||
349 | MG_MISC_SUS0_CFG_CL2PWR_GATING | | ||
350 | MG_MISC_SUS0_CFG_GAONPWR_GATING | | ||
351 | MG_MISC_SUS0_CFG_TRPWR_GATING | | ||
352 | MG_MISC_SUS0_CFG_CL1PWR_GATING | | ||
353 | MG_MISC_SUS0_CFG_DGPWR_GATING); | ||
354 | I915_WRITE(MG_MISC_SUS0(tc_port), val); | ||
355 | } | ||
356 | |||
357 | int | 244 | int |
358 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) | 245 | intel_dp_max_data_rate(int max_link_clock, int max_lanes) |
359 | { | 246 | { |
@@ -455,7 +342,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) | |||
455 | if (INTEL_GEN(dev_priv) >= 10) { | 342 | if (INTEL_GEN(dev_priv) >= 10) { |
456 | source_rates = cnl_rates; | 343 | source_rates = cnl_rates; |
457 | size = ARRAY_SIZE(cnl_rates); | 344 | size = ARRAY_SIZE(cnl_rates); |
458 | if (INTEL_GEN(dev_priv) == 10) | 345 | if (IS_GEN10(dev_priv)) |
459 | max_rate = cnl_max_source_rate(intel_dp); | 346 | max_rate = cnl_max_source_rate(intel_dp); |
460 | else | 347 | else |
461 | max_rate = icl_max_source_rate(intel_dp); | 348 | max_rate = icl_max_source_rate(intel_dp); |
@@ -616,9 +503,12 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
616 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 503 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
617 | struct intel_connector *intel_connector = to_intel_connector(connector); | 504 | struct intel_connector *intel_connector = to_intel_connector(connector); |
618 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | 505 | struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; |
506 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | ||
619 | int target_clock = mode->clock; | 507 | int target_clock = mode->clock; |
620 | int max_rate, mode_rate, max_lanes, max_link_clock; | 508 | int max_rate, mode_rate, max_lanes, max_link_clock; |
621 | int max_dotclk; | 509 | int max_dotclk; |
510 | u16 dsc_max_output_bpp = 0; | ||
511 | u8 dsc_slice_count = 0; | ||
622 | 512 | ||
623 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | 513 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
624 | return MODE_NO_DBLESCAN; | 514 | return MODE_NO_DBLESCAN; |
@@ -641,7 +531,33 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
641 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); | 531 | max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); |
642 | mode_rate = intel_dp_link_required(target_clock, 18); | 532 | mode_rate = intel_dp_link_required(target_clock, 18); |
643 | 533 | ||
644 | if (mode_rate > max_rate || target_clock > max_dotclk) | 534 | /* |
535 | * Output bpp is stored in 6.4 format so right shift by 4 to get the | ||
536 | * integer value since we support only integer values of bpp. | ||
537 | */ | ||
538 | if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) && | ||
539 | drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { | ||
540 | if (intel_dp_is_edp(intel_dp)) { | ||
541 | dsc_max_output_bpp = | ||
542 | drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; | ||
543 | dsc_slice_count = | ||
544 | drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, | ||
545 | true); | ||
546 | } else { | ||
547 | dsc_max_output_bpp = | ||
548 | intel_dp_dsc_get_output_bpp(max_link_clock, | ||
549 | max_lanes, | ||
550 | target_clock, | ||
551 | mode->hdisplay) >> 4; | ||
552 | dsc_slice_count = | ||
553 | intel_dp_dsc_get_slice_count(intel_dp, | ||
554 | target_clock, | ||
555 | mode->hdisplay); | ||
556 | } | ||
557 | } | ||
558 | |||
559 | if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) || | ||
560 | target_clock > max_dotclk) | ||
645 | return MODE_CLOCK_HIGH; | 561 | return MODE_CLOCK_HIGH; |
646 | 562 | ||
647 | if (mode->clock < 10000) | 563 | if (mode->clock < 10000) |
@@ -690,7 +606,8 @@ static void pps_lock(struct intel_dp *intel_dp) | |||
690 | * See intel_power_sequencer_reset() why we need | 606 | * See intel_power_sequencer_reset() why we need |
691 | * a power domain reference here. | 607 | * a power domain reference here. |
692 | */ | 608 | */ |
693 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 609 | intel_display_power_get(dev_priv, |
610 | intel_aux_power_domain(dp_to_dig_port(intel_dp))); | ||
694 | 611 | ||
695 | mutex_lock(&dev_priv->pps_mutex); | 612 | mutex_lock(&dev_priv->pps_mutex); |
696 | } | 613 | } |
@@ -701,7 +618,8 @@ static void pps_unlock(struct intel_dp *intel_dp) | |||
701 | 618 | ||
702 | mutex_unlock(&dev_priv->pps_mutex); | 619 | mutex_unlock(&dev_priv->pps_mutex); |
703 | 620 | ||
704 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 621 | intel_display_power_put(dev_priv, |
622 | intel_aux_power_domain(dp_to_dig_port(intel_dp))); | ||
705 | } | 623 | } |
706 | 624 | ||
707 | static void | 625 | static void |
@@ -1156,6 +1074,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
1156 | static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 1074 | static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
1157 | { | 1075 | { |
1158 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1076 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1077 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
1159 | 1078 | ||
1160 | if (index) | 1079 | if (index) |
1161 | return 0; | 1080 | return 0; |
@@ -1165,7 +1084,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
1165 | * like to run at 2MHz. So, take the cdclk or PCH rawclk value and | 1084 | * like to run at 2MHz. So, take the cdclk or PCH rawclk value and |
1166 | * divide by 2000 and use that | 1085 | * divide by 2000 and use that |
1167 | */ | 1086 | */ |
1168 | if (intel_dp->aux_ch == AUX_CH_A) | 1087 | if (dig_port->aux_ch == AUX_CH_A) |
1169 | return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); | 1088 | return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000); |
1170 | else | 1089 | else |
1171 | return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); | 1090 | return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); |
@@ -1174,8 +1093,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | |||
1174 | static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) | 1093 | static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) |
1175 | { | 1094 | { |
1176 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1095 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1096 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
1177 | 1097 | ||
1178 | if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { | 1098 | if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) { |
1179 | /* Workaround for non-ULT HSW */ | 1099 | /* Workaround for non-ULT HSW */ |
1180 | switch (index) { | 1100 | switch (index) { |
1181 | case 0: return 63; | 1101 | case 0: return 63; |
@@ -1503,80 +1423,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) | |||
1503 | return ret; | 1423 | return ret; |
1504 | } | 1424 | } |
1505 | 1425 | ||
1506 | static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp) | ||
1507 | { | ||
1508 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | ||
1509 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | ||
1510 | enum port port = encoder->port; | ||
1511 | const struct ddi_vbt_port_info *info = | ||
1512 | &dev_priv->vbt.ddi_port_info[port]; | ||
1513 | enum aux_ch aux_ch; | ||
1514 | |||
1515 | if (!info->alternate_aux_channel) { | ||
1516 | aux_ch = (enum aux_ch) port; | ||
1517 | |||
1518 | DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", | ||
1519 | aux_ch_name(aux_ch), port_name(port)); | ||
1520 | return aux_ch; | ||
1521 | } | ||
1522 | |||
1523 | switch (info->alternate_aux_channel) { | ||
1524 | case DP_AUX_A: | ||
1525 | aux_ch = AUX_CH_A; | ||
1526 | break; | ||
1527 | case DP_AUX_B: | ||
1528 | aux_ch = AUX_CH_B; | ||
1529 | break; | ||
1530 | case DP_AUX_C: | ||
1531 | aux_ch = AUX_CH_C; | ||
1532 | break; | ||
1533 | case DP_AUX_D: | ||
1534 | aux_ch = AUX_CH_D; | ||
1535 | break; | ||
1536 | case DP_AUX_E: | ||
1537 | aux_ch = AUX_CH_E; | ||
1538 | break; | ||
1539 | case DP_AUX_F: | ||
1540 | aux_ch = AUX_CH_F; | ||
1541 | break; | ||
1542 | default: | ||
1543 | MISSING_CASE(info->alternate_aux_channel); | ||
1544 | aux_ch = AUX_CH_A; | ||
1545 | break; | ||
1546 | } | ||
1547 | |||
1548 | DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", | ||
1549 | aux_ch_name(aux_ch), port_name(port)); | ||
1550 | |||
1551 | return aux_ch; | ||
1552 | } | ||
1553 | |||
1554 | static enum intel_display_power_domain | ||
1555 | intel_aux_power_domain(struct intel_dp *intel_dp) | ||
1556 | { | ||
1557 | switch (intel_dp->aux_ch) { | ||
1558 | case AUX_CH_A: | ||
1559 | return POWER_DOMAIN_AUX_A; | ||
1560 | case AUX_CH_B: | ||
1561 | return POWER_DOMAIN_AUX_B; | ||
1562 | case AUX_CH_C: | ||
1563 | return POWER_DOMAIN_AUX_C; | ||
1564 | case AUX_CH_D: | ||
1565 | return POWER_DOMAIN_AUX_D; | ||
1566 | case AUX_CH_E: | ||
1567 | return POWER_DOMAIN_AUX_E; | ||
1568 | case AUX_CH_F: | ||
1569 | return POWER_DOMAIN_AUX_F; | ||
1570 | default: | ||
1571 | MISSING_CASE(intel_dp->aux_ch); | ||
1572 | return POWER_DOMAIN_AUX_A; | ||
1573 | } | ||
1574 | } | ||
1575 | 1426 | ||
1576 | static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) | 1427 | static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) |
1577 | { | 1428 | { |
1578 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1429 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1579 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1430 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1431 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1580 | 1432 | ||
1581 | switch (aux_ch) { | 1433 | switch (aux_ch) { |
1582 | case AUX_CH_B: | 1434 | case AUX_CH_B: |
@@ -1592,7 +1444,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp) | |||
1592 | static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) | 1444 | static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) |
1593 | { | 1445 | { |
1594 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1446 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1595 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1447 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1448 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1596 | 1449 | ||
1597 | switch (aux_ch) { | 1450 | switch (aux_ch) { |
1598 | case AUX_CH_B: | 1451 | case AUX_CH_B: |
@@ -1608,7 +1461,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index) | |||
1608 | static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) | 1461 | static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) |
1609 | { | 1462 | { |
1610 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1463 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1611 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1464 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1465 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1612 | 1466 | ||
1613 | switch (aux_ch) { | 1467 | switch (aux_ch) { |
1614 | case AUX_CH_A: | 1468 | case AUX_CH_A: |
@@ -1626,7 +1480,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp) | |||
1626 | static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) | 1480 | static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) |
1627 | { | 1481 | { |
1628 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1482 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1629 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1483 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1484 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1630 | 1485 | ||
1631 | switch (aux_ch) { | 1486 | switch (aux_ch) { |
1632 | case AUX_CH_A: | 1487 | case AUX_CH_A: |
@@ -1644,7 +1499,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index) | |||
1644 | static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) | 1499 | static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) |
1645 | { | 1500 | { |
1646 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1501 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1647 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1502 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1503 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1648 | 1504 | ||
1649 | switch (aux_ch) { | 1505 | switch (aux_ch) { |
1650 | case AUX_CH_A: | 1506 | case AUX_CH_A: |
@@ -1663,7 +1519,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) | |||
1663 | static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) | 1519 | static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) |
1664 | { | 1520 | { |
1665 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1521 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1666 | enum aux_ch aux_ch = intel_dp->aux_ch; | 1522 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1523 | enum aux_ch aux_ch = dig_port->aux_ch; | ||
1667 | 1524 | ||
1668 | switch (aux_ch) { | 1525 | switch (aux_ch) { |
1669 | case AUX_CH_A: | 1526 | case AUX_CH_A: |
@@ -1689,10 +1546,8 @@ static void | |||
1689 | intel_dp_aux_init(struct intel_dp *intel_dp) | 1546 | intel_dp_aux_init(struct intel_dp *intel_dp) |
1690 | { | 1547 | { |
1691 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 1548 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
1692 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | 1549 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
1693 | 1550 | struct intel_encoder *encoder = &dig_port->base; | |
1694 | intel_dp->aux_ch = intel_aux_ch(intel_dp); | ||
1695 | intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp); | ||
1696 | 1551 | ||
1697 | if (INTEL_GEN(dev_priv) >= 9) { | 1552 | if (INTEL_GEN(dev_priv) >= 9) { |
1698 | intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; | 1553 | intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg; |
@@ -1951,6 +1806,42 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, | |||
1951 | return false; | 1806 | return false; |
1952 | } | 1807 | } |
1953 | 1808 | ||
1809 | /* Optimize link config in order: max bpp, min lanes, min clock */ | ||
1810 | static bool | ||
1811 | intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, | ||
1812 | struct intel_crtc_state *pipe_config, | ||
1813 | const struct link_config_limits *limits) | ||
1814 | { | ||
1815 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | ||
1816 | int bpp, clock, lane_count; | ||
1817 | int mode_rate, link_clock, link_avail; | ||
1818 | |||
1819 | for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { | ||
1820 | mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, | ||
1821 | bpp); | ||
1822 | |||
1823 | for (lane_count = limits->min_lane_count; | ||
1824 | lane_count <= limits->max_lane_count; | ||
1825 | lane_count <<= 1) { | ||
1826 | for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { | ||
1827 | link_clock = intel_dp->common_rates[clock]; | ||
1828 | link_avail = intel_dp_max_data_rate(link_clock, | ||
1829 | lane_count); | ||
1830 | |||
1831 | if (mode_rate <= link_avail) { | ||
1832 | pipe_config->lane_count = lane_count; | ||
1833 | pipe_config->pipe_bpp = bpp; | ||
1834 | pipe_config->port_clock = link_clock; | ||
1835 | |||
1836 | return true; | ||
1837 | } | ||
1838 | } | ||
1839 | } | ||
1840 | } | ||
1841 | |||
1842 | return false; | ||
1843 | } | ||
1844 | |||
1954 | static bool | 1845 | static bool |
1955 | intel_dp_compute_link_config(struct intel_encoder *encoder, | 1846 | intel_dp_compute_link_config(struct intel_encoder *encoder, |
1956 | struct intel_crtc_state *pipe_config) | 1847 | struct intel_crtc_state *pipe_config) |
@@ -1975,13 +1866,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
1975 | limits.min_bpp = 6 * 3; | 1866 | limits.min_bpp = 6 * 3; |
1976 | limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); | 1867 | limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config); |
1977 | 1868 | ||
1978 | if (intel_dp_is_edp(intel_dp)) { | 1869 | if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) { |
1979 | /* | 1870 | /* |
1980 | * Use the maximum clock and number of lanes the eDP panel | 1871 | * Use the maximum clock and number of lanes the eDP panel |
1981 | * advertizes being capable of. The panels are generally | 1872 | * advertizes being capable of. The eDP 1.3 and earlier panels |
1982 | * designed to support only a single clock and lane | 1873 | * are generally designed to support only a single clock and |
1983 | * configuration, and typically these values correspond to the | 1874 | * lane configuration, and typically these values correspond to |
1984 | * native resolution of the panel. | 1875 | * the native resolution of the panel. With eDP 1.4 rate select |
1876 | * and DSC, this is decreasingly the case, and we need to be | ||
1877 | * able to select less than maximum link config. | ||
1985 | */ | 1878 | */ |
1986 | limits.min_lane_count = limits.max_lane_count; | 1879 | limits.min_lane_count = limits.max_lane_count; |
1987 | limits.min_clock = limits.max_clock; | 1880 | limits.min_clock = limits.max_clock; |
@@ -1995,12 +1888,25 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, | |||
1995 | intel_dp->common_rates[limits.max_clock], | 1888 | intel_dp->common_rates[limits.max_clock], |
1996 | limits.max_bpp, adjusted_mode->crtc_clock); | 1889 | limits.max_bpp, adjusted_mode->crtc_clock); |
1997 | 1890 | ||
1998 | /* | 1891 | if (intel_dp_is_edp(intel_dp)) { |
1999 | * Optimize for slow and wide. This is the place to add alternative | 1892 | /* |
2000 | * optimization policy. | 1893 | * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 |
2001 | */ | 1894 | * section A.1: "It is recommended that the minimum number of |
2002 | if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits)) | 1895 | * lanes be used, using the minimum link rate allowed for that |
2003 | return false; | 1896 | * lane configuration." |
1897 | * | ||
1898 | * Note that we use the max clock and lane count for eDP 1.3 and | ||
1899 | * earlier, and fast vs. wide is irrelevant. | ||
1900 | */ | ||
1901 | if (!intel_dp_compute_link_config_fast(intel_dp, pipe_config, | ||
1902 | &limits)) | ||
1903 | return false; | ||
1904 | } else { | ||
1905 | /* Optimize for slow and wide. */ | ||
1906 | if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, | ||
1907 | &limits)) | ||
1908 | return false; | ||
1909 | } | ||
2004 | 1910 | ||
2005 | DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", | 1911 | DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n", |
2006 | pipe_config->lane_count, pipe_config->port_clock, | 1912 | pipe_config->lane_count, pipe_config->port_clock, |
@@ -2023,6 +1929,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
2023 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 1929 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2024 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 1930 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
2025 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | 1931 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); |
1932 | struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); | ||
2026 | enum port port = encoder->port; | 1933 | enum port port = encoder->port; |
2027 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); | 1934 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); |
2028 | struct intel_connector *intel_connector = intel_dp->attached_connector; | 1935 | struct intel_connector *intel_connector = intel_dp->attached_connector; |
@@ -2034,6 +1941,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
2034 | if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) | 1941 | if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) |
2035 | pipe_config->has_pch_encoder = true; | 1942 | pipe_config->has_pch_encoder = true; |
2036 | 1943 | ||
1944 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
1945 | if (lspcon->active) | ||
1946 | lspcon_ycbcr420_config(&intel_connector->base, pipe_config); | ||
1947 | |||
2037 | pipe_config->has_drrs = false; | 1948 | pipe_config->has_drrs = false; |
2038 | if (IS_G4X(dev_priv) || port == PORT_A) | 1949 | if (IS_G4X(dev_priv) || port == PORT_A) |
2039 | pipe_config->has_audio = false; | 1950 | pipe_config->has_audio = false; |
@@ -2338,7 +2249,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
2338 | if (edp_have_panel_vdd(intel_dp)) | 2249 | if (edp_have_panel_vdd(intel_dp)) |
2339 | return need_to_disable; | 2250 | return need_to_disable; |
2340 | 2251 | ||
2341 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 2252 | intel_display_power_get(dev_priv, |
2253 | intel_aux_power_domain(intel_dig_port)); | ||
2342 | 2254 | ||
2343 | DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", | 2255 | DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", |
2344 | port_name(intel_dig_port->base.port)); | 2256 | port_name(intel_dig_port->base.port)); |
@@ -2424,7 +2336,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) | |||
2424 | if ((pp & PANEL_POWER_ON) == 0) | 2336 | if ((pp & PANEL_POWER_ON) == 0) |
2425 | intel_dp->panel_power_off_time = ktime_get_boottime(); | 2337 | intel_dp->panel_power_off_time = ktime_get_boottime(); |
2426 | 2338 | ||
2427 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2339 | intel_display_power_put(dev_priv, |
2340 | intel_aux_power_domain(intel_dig_port)); | ||
2428 | } | 2341 | } |
2429 | 2342 | ||
2430 | static void edp_panel_vdd_work(struct work_struct *__work) | 2343 | static void edp_panel_vdd_work(struct work_struct *__work) |
@@ -2537,6 +2450,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp) | |||
2537 | static void edp_panel_off(struct intel_dp *intel_dp) | 2450 | static void edp_panel_off(struct intel_dp *intel_dp) |
2538 | { | 2451 | { |
2539 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 2452 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
2453 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
2540 | u32 pp; | 2454 | u32 pp; |
2541 | i915_reg_t pp_ctrl_reg; | 2455 | i915_reg_t pp_ctrl_reg; |
2542 | 2456 | ||
@@ -2546,10 +2460,10 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
2546 | return; | 2460 | return; |
2547 | 2461 | ||
2548 | DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", | 2462 | DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", |
2549 | port_name(dp_to_dig_port(intel_dp)->base.port)); | 2463 | port_name(dig_port->base.port)); |
2550 | 2464 | ||
2551 | WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", | 2465 | WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", |
2552 | port_name(dp_to_dig_port(intel_dp)->base.port)); | 2466 | port_name(dig_port->base.port)); |
2553 | 2467 | ||
2554 | pp = ironlake_get_pp_control(intel_dp); | 2468 | pp = ironlake_get_pp_control(intel_dp); |
2555 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 2469 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
@@ -2568,7 +2482,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
2568 | intel_dp->panel_power_off_time = ktime_get_boottime(); | 2482 | intel_dp->panel_power_off_time = ktime_get_boottime(); |
2569 | 2483 | ||
2570 | /* We got a reference when we enabled the VDD. */ | 2484 | /* We got a reference when we enabled the VDD. */ |
2571 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2485 | intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port)); |
2572 | } | 2486 | } |
2573 | 2487 | ||
2574 | void intel_edp_panel_off(struct intel_dp *intel_dp) | 2488 | void intel_edp_panel_off(struct intel_dp *intel_dp) |
@@ -3900,6 +3814,41 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp) | |||
3900 | return intel_dp->dpcd[DP_DPCD_REV] != 0; | 3814 | return intel_dp->dpcd[DP_DPCD_REV] != 0; |
3901 | } | 3815 | } |
3902 | 3816 | ||
3817 | static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) | ||
3818 | { | ||
3819 | /* | ||
3820 | * Clear the cached register set to avoid using stale values | ||
3821 | * for the sinks that do not support DSC. | ||
3822 | */ | ||
3823 | memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); | ||
3824 | |||
3825 | /* Clear fec_capable to avoid using stale values */ | ||
3826 | intel_dp->fec_capable = 0; | ||
3827 | |||
3828 | /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ | ||
3829 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || | ||
3830 | intel_dp->edp_dpcd[0] >= DP_EDP_14) { | ||
3831 | if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, | ||
3832 | intel_dp->dsc_dpcd, | ||
3833 | sizeof(intel_dp->dsc_dpcd)) < 0) | ||
3834 | DRM_ERROR("Failed to read DPCD register 0x%x\n", | ||
3835 | DP_DSC_SUPPORT); | ||
3836 | |||
3837 | DRM_DEBUG_KMS("DSC DPCD: %*ph\n", | ||
3838 | (int)sizeof(intel_dp->dsc_dpcd), | ||
3839 | intel_dp->dsc_dpcd); | ||
3840 | /* FEC is supported only on DP 1.4 */ | ||
3841 | if (!intel_dp_is_edp(intel_dp)) { | ||
3842 | if (drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, | ||
3843 | &intel_dp->fec_capable) < 0) | ||
3844 | DRM_ERROR("Failed to read FEC DPCD register\n"); | ||
3845 | |||
3846 | DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", | ||
3847 | intel_dp->fec_capable); | ||
3848 | } | ||
3849 | } | ||
3850 | } | ||
3851 | |||
3903 | static bool | 3852 | static bool |
3904 | intel_edp_init_dpcd(struct intel_dp *intel_dp) | 3853 | intel_edp_init_dpcd(struct intel_dp *intel_dp) |
3905 | { | 3854 | { |
@@ -3976,6 +3925,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
3976 | 3925 | ||
3977 | intel_dp_set_common_rates(intel_dp); | 3926 | intel_dp_set_common_rates(intel_dp); |
3978 | 3927 | ||
3928 | /* Read the eDP DSC DPCD registers */ | ||
3929 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) | ||
3930 | intel_dp_get_dsc_sink_cap(intel_dp); | ||
3931 | |||
3979 | return true; | 3932 | return true; |
3980 | } | 3933 | } |
3981 | 3934 | ||
@@ -4029,16 +3982,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
4029 | } | 3982 | } |
4030 | 3983 | ||
4031 | static bool | 3984 | static bool |
4032 | intel_dp_can_mst(struct intel_dp *intel_dp) | 3985 | intel_dp_sink_can_mst(struct intel_dp *intel_dp) |
4033 | { | 3986 | { |
4034 | u8 mstm_cap; | 3987 | u8 mstm_cap; |
4035 | 3988 | ||
4036 | if (!i915_modparams.enable_dp_mst) | ||
4037 | return false; | ||
4038 | |||
4039 | if (!intel_dp->can_mst) | ||
4040 | return false; | ||
4041 | |||
4042 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) | 3989 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) |
4043 | return false; | 3990 | return false; |
4044 | 3991 | ||
@@ -4048,34 +3995,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp) | |||
4048 | return mstm_cap & DP_MST_CAP; | 3995 | return mstm_cap & DP_MST_CAP; |
4049 | } | 3996 | } |
4050 | 3997 | ||
3998 | static bool | ||
3999 | intel_dp_can_mst(struct intel_dp *intel_dp) | ||
4000 | { | ||
4001 | return i915_modparams.enable_dp_mst && | ||
4002 | intel_dp->can_mst && | ||
4003 | intel_dp_sink_can_mst(intel_dp); | ||
4004 | } | ||
4005 | |||
4051 | static void | 4006 | static void |
4052 | intel_dp_configure_mst(struct intel_dp *intel_dp) | 4007 | intel_dp_configure_mst(struct intel_dp *intel_dp) |
4053 | { | 4008 | { |
4054 | if (!i915_modparams.enable_dp_mst) | 4009 | struct intel_encoder *encoder = |
4055 | return; | 4010 | &dp_to_dig_port(intel_dp)->base; |
4011 | bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); | ||
4012 | |||
4013 | DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n", | ||
4014 | port_name(encoder->port), yesno(intel_dp->can_mst), | ||
4015 | yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst)); | ||
4056 | 4016 | ||
4057 | if (!intel_dp->can_mst) | 4017 | if (!intel_dp->can_mst) |
4058 | return; | 4018 | return; |
4059 | 4019 | ||
4060 | intel_dp->is_mst = intel_dp_can_mst(intel_dp); | 4020 | intel_dp->is_mst = sink_can_mst && |
4061 | 4021 | i915_modparams.enable_dp_mst; | |
4062 | if (intel_dp->is_mst) | ||
4063 | DRM_DEBUG_KMS("Sink is MST capable\n"); | ||
4064 | else | ||
4065 | DRM_DEBUG_KMS("Sink is not MST capable\n"); | ||
4066 | 4022 | ||
4067 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, | 4023 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, |
4068 | intel_dp->is_mst); | 4024 | intel_dp->is_mst); |
4069 | } | 4025 | } |
4070 | 4026 | ||
4071 | static bool | 4027 | static bool |
4072 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | ||
4073 | { | ||
4074 | return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
4075 | sink_irq_vector) == 1; | ||
4076 | } | ||
4077 | |||
4078 | static bool | ||
4079 | intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) | 4028 | intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) |
4080 | { | 4029 | { |
4081 | return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, | 4030 | return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, |
@@ -4083,6 +4032,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) | |||
4083 | DP_DPRX_ESI_LEN; | 4032 | DP_DPRX_ESI_LEN; |
4084 | } | 4033 | } |
4085 | 4034 | ||
4035 | u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, | ||
4036 | int mode_clock, int mode_hdisplay) | ||
4037 | { | ||
4038 | u16 bits_per_pixel, max_bpp_small_joiner_ram; | ||
4039 | int i; | ||
4040 | |||
4041 | /* | ||
4042 | * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* | ||
4043 | * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP) | ||
4044 | * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1, | ||
4045 | * for MST -> TimeSlotsPerMTP has to be calculated | ||
4046 | */ | ||
4047 | bits_per_pixel = (link_clock * lane_count * 8 * | ||
4048 | DP_DSC_FEC_OVERHEAD_FACTOR) / | ||
4049 | mode_clock; | ||
4050 | |||
4051 | /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ | ||
4052 | max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / | ||
4053 | mode_hdisplay; | ||
4054 | |||
4055 | /* | ||
4056 | * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW | ||
4057 | * check, output bpp from small joiner RAM check) | ||
4058 | */ | ||
4059 | bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); | ||
4060 | |||
4061 | /* Error out if the max bpp is less than smallest allowed valid bpp */ | ||
4062 | if (bits_per_pixel < valid_dsc_bpp[0]) { | ||
4063 | DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel); | ||
4064 | return 0; | ||
4065 | } | ||
4066 | |||
4067 | /* Find the nearest match in the array of known BPPs from VESA */ | ||
4068 | for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { | ||
4069 | if (bits_per_pixel < valid_dsc_bpp[i + 1]) | ||
4070 | break; | ||
4071 | } | ||
4072 | bits_per_pixel = valid_dsc_bpp[i]; | ||
4073 | |||
4074 | /* | ||
4075 | * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, | ||
4076 | * fractional part is 0 | ||
4077 | */ | ||
4078 | return bits_per_pixel << 4; | ||
4079 | } | ||
4080 | |||
4081 | u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, | ||
4082 | int mode_clock, | ||
4083 | int mode_hdisplay) | ||
4084 | { | ||
4085 | u8 min_slice_count, i; | ||
4086 | int max_slice_width; | ||
4087 | |||
4088 | if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) | ||
4089 | min_slice_count = DIV_ROUND_UP(mode_clock, | ||
4090 | DP_DSC_MAX_ENC_THROUGHPUT_0); | ||
4091 | else | ||
4092 | min_slice_count = DIV_ROUND_UP(mode_clock, | ||
4093 | DP_DSC_MAX_ENC_THROUGHPUT_1); | ||
4094 | |||
4095 | max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); | ||
4096 | if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { | ||
4097 | DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", | ||
4098 | max_slice_width); | ||
4099 | return 0; | ||
4100 | } | ||
4101 | /* Also take into account max slice width */ | ||
4102 | min_slice_count = min_t(uint8_t, min_slice_count, | ||
4103 | DIV_ROUND_UP(mode_hdisplay, | ||
4104 | max_slice_width)); | ||
4105 | |||
4106 | /* Find the closest match to the valid slice count values */ | ||
4107 | for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { | ||
4108 | if (valid_dsc_slicecount[i] > | ||
4109 | drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, | ||
4110 | false)) | ||
4111 | break; | ||
4112 | if (min_slice_count <= valid_dsc_slicecount[i]) | ||
4113 | return valid_dsc_slicecount[i]; | ||
4114 | } | ||
4115 | |||
4116 | DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); | ||
4117 | return 0; | ||
4118 | } | ||
4119 | |||
4086 | static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) | 4120 | static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) |
4087 | { | 4121 | { |
4088 | int status = 0; | 4122 | int status = 0; |
@@ -4403,7 +4437,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, | |||
4403 | 4437 | ||
4404 | /* Suppress underruns caused by re-training */ | 4438 | /* Suppress underruns caused by re-training */ |
4405 | intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); | 4439 | intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); |
4406 | if (crtc->config->has_pch_encoder) | 4440 | if (crtc_state->has_pch_encoder) |
4407 | intel_set_pch_fifo_underrun_reporting(dev_priv, | 4441 | intel_set_pch_fifo_underrun_reporting(dev_priv, |
4408 | intel_crtc_pch_transcoder(crtc), false); | 4442 | intel_crtc_pch_transcoder(crtc), false); |
4409 | 4443 | ||
@@ -4414,7 +4448,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, | |||
4414 | intel_wait_for_vblank(dev_priv, crtc->pipe); | 4448 | intel_wait_for_vblank(dev_priv, crtc->pipe); |
4415 | 4449 | ||
4416 | intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); | 4450 | intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); |
4417 | if (crtc->config->has_pch_encoder) | 4451 | if (crtc_state->has_pch_encoder) |
4418 | intel_set_pch_fifo_underrun_reporting(dev_priv, | 4452 | intel_set_pch_fifo_underrun_reporting(dev_priv, |
4419 | intel_crtc_pch_transcoder(crtc), true); | 4453 | intel_crtc_pch_transcoder(crtc), true); |
4420 | 4454 | ||
@@ -4462,6 +4496,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder, | |||
4462 | return changed; | 4496 | return changed; |
4463 | } | 4497 | } |
4464 | 4498 | ||
4499 | static void intel_dp_check_service_irq(struct intel_dp *intel_dp) | ||
4500 | { | ||
4501 | u8 val; | ||
4502 | |||
4503 | if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) | ||
4504 | return; | ||
4505 | |||
4506 | if (drm_dp_dpcd_readb(&intel_dp->aux, | ||
4507 | DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) | ||
4508 | return; | ||
4509 | |||
4510 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); | ||
4511 | |||
4512 | if (val & DP_AUTOMATED_TEST_REQUEST) | ||
4513 | intel_dp_handle_test_request(intel_dp); | ||
4514 | |||
4515 | if (val & DP_CP_IRQ) | ||
4516 | intel_hdcp_check_link(intel_dp->attached_connector); | ||
4517 | |||
4518 | if (val & DP_SINK_SPECIFIC_IRQ) | ||
4519 | DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); | ||
4520 | } | ||
4521 | |||
4465 | /* | 4522 | /* |
4466 | * According to DP spec | 4523 | * According to DP spec |
4467 | * 5.1.2: | 4524 | * 5.1.2: |
@@ -4479,7 +4536,6 @@ static bool | |||
4479 | intel_dp_short_pulse(struct intel_dp *intel_dp) | 4536 | intel_dp_short_pulse(struct intel_dp *intel_dp) |
4480 | { | 4537 | { |
4481 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 4538 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
4482 | u8 sink_irq_vector = 0; | ||
4483 | u8 old_sink_count = intel_dp->sink_count; | 4539 | u8 old_sink_count = intel_dp->sink_count; |
4484 | bool ret; | 4540 | bool ret; |
4485 | 4541 | ||
@@ -4502,20 +4558,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) | |||
4502 | return false; | 4558 | return false; |
4503 | } | 4559 | } |
4504 | 4560 | ||
4505 | /* Try to read the source of the interrupt */ | 4561 | intel_dp_check_service_irq(intel_dp); |
4506 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
4507 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && | ||
4508 | sink_irq_vector != 0) { | ||
4509 | /* Clear interrupt source */ | ||
4510 | drm_dp_dpcd_writeb(&intel_dp->aux, | ||
4511 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
4512 | sink_irq_vector); | ||
4513 | |||
4514 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | ||
4515 | intel_dp_handle_test_request(intel_dp); | ||
4516 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) | ||
4517 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | ||
4518 | } | ||
4519 | 4562 | ||
4520 | /* Handle CEC interrupts, if any */ | 4563 | /* Handle CEC interrupts, if any */ |
4521 | drm_dp_cec_irq(&intel_dp->aux); | 4564 | drm_dp_cec_irq(&intel_dp->aux); |
@@ -4810,6 +4853,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv, | |||
4810 | type_str); | 4853 | type_str); |
4811 | } | 4854 | } |
4812 | 4855 | ||
4856 | static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, | ||
4857 | struct intel_digital_port *dig_port); | ||
4858 | |||
4813 | /* | 4859 | /* |
4814 | * This function implements the first part of the Connect Flow described by our | 4860 | * This function implements the first part of the Connect Flow described by our |
4815 | * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading | 4861 | * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading |
@@ -4864,9 +4910,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv, | |||
4864 | if (dig_port->tc_type == TC_PORT_TYPEC && | 4910 | if (dig_port->tc_type == TC_PORT_TYPEC && |
4865 | !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { | 4911 | !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) { |
4866 | DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); | 4912 | DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port); |
4867 | val = I915_READ(PORT_TX_DFLEXDPCSSS); | 4913 | icl_tc_phy_disconnect(dev_priv, dig_port); |
4868 | val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); | ||
4869 | I915_WRITE(PORT_TX_DFLEXDPCSSS, val); | ||
4870 | return false; | 4914 | return false; |
4871 | } | 4915 | } |
4872 | 4916 | ||
@@ -4881,21 +4925,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv, | |||
4881 | struct intel_digital_port *dig_port) | 4925 | struct intel_digital_port *dig_port) |
4882 | { | 4926 | { |
4883 | enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); | 4927 | enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); |
4884 | u32 val; | ||
4885 | 4928 | ||
4886 | if (dig_port->tc_type != TC_PORT_LEGACY && | 4929 | if (dig_port->tc_type == TC_PORT_UNKNOWN) |
4887 | dig_port->tc_type != TC_PORT_TYPEC) | ||
4888 | return; | 4930 | return; |
4889 | 4931 | ||
4890 | /* | 4932 | /* |
4891 | * This function may be called many times in a row without an HPD event | 4933 | * TBT disconnection flow is read the live status, what was done in |
4892 | * in between, so try to avoid the write when we can. | 4934 | * caller. |
4893 | */ | 4935 | */ |
4894 | val = I915_READ(PORT_TX_DFLEXDPCSSS); | 4936 | if (dig_port->tc_type == TC_PORT_TYPEC || |
4895 | if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) { | 4937 | dig_port->tc_type == TC_PORT_LEGACY) { |
4938 | u32 val; | ||
4939 | |||
4940 | val = I915_READ(PORT_TX_DFLEXDPCSSS); | ||
4896 | val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); | 4941 | val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); |
4897 | I915_WRITE(PORT_TX_DFLEXDPCSSS, val); | 4942 | I915_WRITE(PORT_TX_DFLEXDPCSSS, val); |
4898 | } | 4943 | } |
4944 | |||
4945 | dig_port->tc_type = TC_PORT_UNKNOWN; | ||
4899 | } | 4946 | } |
4900 | 4947 | ||
4901 | /* | 4948 | /* |
@@ -4945,19 +4992,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder) | |||
4945 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 4992 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
4946 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | 4993 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); |
4947 | 4994 | ||
4948 | switch (encoder->hpd_pin) { | 4995 | if (intel_port_is_combophy(dev_priv, encoder->port)) |
4949 | case HPD_PORT_A: | ||
4950 | case HPD_PORT_B: | ||
4951 | return icl_combo_port_connected(dev_priv, dig_port); | 4996 | return icl_combo_port_connected(dev_priv, dig_port); |
4952 | case HPD_PORT_C: | 4997 | else if (intel_port_is_tc(dev_priv, encoder->port)) |
4953 | case HPD_PORT_D: | ||
4954 | case HPD_PORT_E: | ||
4955 | case HPD_PORT_F: | ||
4956 | return icl_tc_port_connected(dev_priv, dig_port); | 4998 | return icl_tc_port_connected(dev_priv, dig_port); |
4957 | default: | 4999 | else |
4958 | MISSING_CASE(encoder->hpd_pin); | 5000 | MISSING_CASE(encoder->hpd_pin); |
4959 | return false; | 5001 | |
4960 | } | 5002 | return false; |
4961 | } | 5003 | } |
4962 | 5004 | ||
4963 | /* | 5005 | /* |
@@ -4982,20 +5024,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder) | |||
4982 | return g4x_digital_port_connected(encoder); | 5024 | return g4x_digital_port_connected(encoder); |
4983 | } | 5025 | } |
4984 | 5026 | ||
4985 | if (IS_GEN5(dev_priv)) | 5027 | if (INTEL_GEN(dev_priv) >= 11) |
4986 | return ilk_digital_port_connected(encoder); | 5028 | return icl_digital_port_connected(encoder); |
4987 | else if (IS_GEN6(dev_priv)) | 5029 | else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) |
4988 | return snb_digital_port_connected(encoder); | 5030 | return spt_digital_port_connected(encoder); |
4989 | else if (IS_GEN7(dev_priv)) | ||
4990 | return ivb_digital_port_connected(encoder); | ||
4991 | else if (IS_GEN8(dev_priv)) | ||
4992 | return bdw_digital_port_connected(encoder); | ||
4993 | else if (IS_GEN9_LP(dev_priv)) | 5031 | else if (IS_GEN9_LP(dev_priv)) |
4994 | return bxt_digital_port_connected(encoder); | 5032 | return bxt_digital_port_connected(encoder); |
4995 | else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) | 5033 | else if (IS_GEN8(dev_priv)) |
4996 | return spt_digital_port_connected(encoder); | 5034 | return bdw_digital_port_connected(encoder); |
4997 | else | 5035 | else if (IS_GEN7(dev_priv)) |
4998 | return icl_digital_port_connected(encoder); | 5036 | return ivb_digital_port_connected(encoder); |
5037 | else if (IS_GEN6(dev_priv)) | ||
5038 | return snb_digital_port_connected(encoder); | ||
5039 | else if (IS_GEN5(dev_priv)) | ||
5040 | return ilk_digital_port_connected(encoder); | ||
5041 | |||
5042 | MISSING_CASE(INTEL_GEN(dev_priv)); | ||
5043 | return false; | ||
4999 | } | 5044 | } |
5000 | 5045 | ||
5001 | static struct edid * | 5046 | static struct edid * |
@@ -5042,28 +5087,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) | |||
5042 | } | 5087 | } |
5043 | 5088 | ||
5044 | static int | 5089 | static int |
5045 | intel_dp_long_pulse(struct intel_connector *connector, | 5090 | intel_dp_detect(struct drm_connector *connector, |
5046 | struct drm_modeset_acquire_ctx *ctx) | 5091 | struct drm_modeset_acquire_ctx *ctx, |
5092 | bool force) | ||
5047 | { | 5093 | { |
5048 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 5094 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
5049 | struct intel_dp *intel_dp = intel_attached_dp(&connector->base); | 5095 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
5096 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
5097 | struct intel_encoder *encoder = &dig_port->base; | ||
5050 | enum drm_connector_status status; | 5098 | enum drm_connector_status status; |
5051 | u8 sink_irq_vector = 0; | 5099 | enum intel_display_power_domain aux_domain = |
5100 | intel_aux_power_domain(dig_port); | ||
5052 | 5101 | ||
5102 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
5103 | connector->base.id, connector->name); | ||
5053 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); | 5104 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); |
5054 | 5105 | ||
5055 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 5106 | intel_display_power_get(dev_priv, aux_domain); |
5056 | 5107 | ||
5057 | /* Can't disconnect eDP */ | 5108 | /* Can't disconnect eDP */ |
5058 | if (intel_dp_is_edp(intel_dp)) | 5109 | if (intel_dp_is_edp(intel_dp)) |
5059 | status = edp_detect(intel_dp); | 5110 | status = edp_detect(intel_dp); |
5060 | else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) | 5111 | else if (intel_digital_port_connected(encoder)) |
5061 | status = intel_dp_detect_dpcd(intel_dp); | 5112 | status = intel_dp_detect_dpcd(intel_dp); |
5062 | else | 5113 | else |
5063 | status = connector_status_disconnected; | 5114 | status = connector_status_disconnected; |
5064 | 5115 | ||
5065 | if (status == connector_status_disconnected) { | 5116 | if (status == connector_status_disconnected) { |
5066 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); | 5117 | memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); |
5118 | memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); | ||
5067 | 5119 | ||
5068 | if (intel_dp->is_mst) { | 5120 | if (intel_dp->is_mst) { |
5069 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", | 5121 | DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", |
@@ -5089,6 +5141,10 @@ intel_dp_long_pulse(struct intel_connector *connector, | |||
5089 | 5141 | ||
5090 | intel_dp_print_rates(intel_dp); | 5142 | intel_dp_print_rates(intel_dp); |
5091 | 5143 | ||
5144 | /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ | ||
5145 | if (INTEL_GEN(dev_priv) >= 11) | ||
5146 | intel_dp_get_dsc_sink_cap(intel_dp); | ||
5147 | |||
5092 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, | 5148 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, |
5093 | drm_dp_is_branch(intel_dp->dpcd)); | 5149 | drm_dp_is_branch(intel_dp->dpcd)); |
5094 | 5150 | ||
@@ -5109,9 +5165,13 @@ intel_dp_long_pulse(struct intel_connector *connector, | |||
5109 | * with an IRQ_HPD, so force a link status check. | 5165 | * with an IRQ_HPD, so force a link status check. |
5110 | */ | 5166 | */ |
5111 | if (!intel_dp_is_edp(intel_dp)) { | 5167 | if (!intel_dp_is_edp(intel_dp)) { |
5112 | struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; | 5168 | int ret; |
5113 | 5169 | ||
5114 | intel_dp_retrain_link(encoder, ctx); | 5170 | ret = intel_dp_retrain_link(encoder, ctx); |
5171 | if (ret) { | ||
5172 | intel_display_power_put(dev_priv, aux_domain); | ||
5173 | return ret; | ||
5174 | } | ||
5115 | } | 5175 | } |
5116 | 5176 | ||
5117 | /* | 5177 | /* |
@@ -5123,61 +5183,17 @@ intel_dp_long_pulse(struct intel_connector *connector, | |||
5123 | intel_dp->aux.i2c_defer_count = 0; | 5183 | intel_dp->aux.i2c_defer_count = 0; |
5124 | 5184 | ||
5125 | intel_dp_set_edid(intel_dp); | 5185 | intel_dp_set_edid(intel_dp); |
5126 | if (intel_dp_is_edp(intel_dp) || connector->detect_edid) | 5186 | if (intel_dp_is_edp(intel_dp) || |
5187 | to_intel_connector(connector)->detect_edid) | ||
5127 | status = connector_status_connected; | 5188 | status = connector_status_connected; |
5128 | intel_dp->detect_done = true; | ||
5129 | 5189 | ||
5130 | /* Try to read the source of the interrupt */ | 5190 | intel_dp_check_service_irq(intel_dp); |
5131 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
5132 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) && | ||
5133 | sink_irq_vector != 0) { | ||
5134 | /* Clear interrupt source */ | ||
5135 | drm_dp_dpcd_writeb(&intel_dp->aux, | ||
5136 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
5137 | sink_irq_vector); | ||
5138 | |||
5139 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | ||
5140 | intel_dp_handle_test_request(intel_dp); | ||
5141 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) | ||
5142 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | ||
5143 | } | ||
5144 | 5191 | ||
5145 | out: | 5192 | out: |
5146 | if (status != connector_status_connected && !intel_dp->is_mst) | 5193 | if (status != connector_status_connected && !intel_dp->is_mst) |
5147 | intel_dp_unset_edid(intel_dp); | 5194 | intel_dp_unset_edid(intel_dp); |
5148 | 5195 | ||
5149 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 5196 | intel_display_power_put(dev_priv, aux_domain); |
5150 | return status; | ||
5151 | } | ||
5152 | |||
5153 | static int | ||
5154 | intel_dp_detect(struct drm_connector *connector, | ||
5155 | struct drm_modeset_acquire_ctx *ctx, | ||
5156 | bool force) | ||
5157 | { | ||
5158 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
5159 | int status = connector->status; | ||
5160 | |||
5161 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | ||
5162 | connector->base.id, connector->name); | ||
5163 | |||
5164 | /* If full detect is not performed yet, do a full detect */ | ||
5165 | if (!intel_dp->detect_done) { | ||
5166 | struct drm_crtc *crtc; | ||
5167 | int ret; | ||
5168 | |||
5169 | crtc = connector->state->crtc; | ||
5170 | if (crtc) { | ||
5171 | ret = drm_modeset_lock(&crtc->mutex, ctx); | ||
5172 | if (ret) | ||
5173 | return ret; | ||
5174 | } | ||
5175 | |||
5176 | status = intel_dp_long_pulse(intel_dp->attached_connector, ctx); | ||
5177 | } | ||
5178 | |||
5179 | intel_dp->detect_done = false; | ||
5180 | |||
5181 | return status; | 5197 | return status; |
5182 | } | 5198 | } |
5183 | 5199 | ||
@@ -5185,8 +5201,11 @@ static void | |||
5185 | intel_dp_force(struct drm_connector *connector) | 5201 | intel_dp_force(struct drm_connector *connector) |
5186 | { | 5202 | { |
5187 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 5203 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
5188 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | 5204 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
5205 | struct intel_encoder *intel_encoder = &dig_port->base; | ||
5189 | struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); | 5206 | struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); |
5207 | enum intel_display_power_domain aux_domain = | ||
5208 | intel_aux_power_domain(dig_port); | ||
5190 | 5209 | ||
5191 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 5210 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
5192 | connector->base.id, connector->name); | 5211 | connector->base.id, connector->name); |
@@ -5195,11 +5214,11 @@ intel_dp_force(struct drm_connector *connector) | |||
5195 | if (connector->status != connector_status_connected) | 5214 | if (connector->status != connector_status_connected) |
5196 | return; | 5215 | return; |
5197 | 5216 | ||
5198 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 5217 | intel_display_power_get(dev_priv, aux_domain); |
5199 | 5218 | ||
5200 | intel_dp_set_edid(intel_dp); | 5219 | intel_dp_set_edid(intel_dp); |
5201 | 5220 | ||
5202 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 5221 | intel_display_power_put(dev_priv, aux_domain); |
5203 | } | 5222 | } |
5204 | 5223 | ||
5205 | static int intel_dp_get_modes(struct drm_connector *connector) | 5224 | static int intel_dp_get_modes(struct drm_connector *connector) |
@@ -5264,27 +5283,6 @@ intel_dp_connector_unregister(struct drm_connector *connector) | |||
5264 | intel_connector_unregister(connector); | 5283 | intel_connector_unregister(connector); |
5265 | } | 5284 | } |
5266 | 5285 | ||
5267 | static void | ||
5268 | intel_dp_connector_destroy(struct drm_connector *connector) | ||
5269 | { | ||
5270 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
5271 | |||
5272 | kfree(intel_connector->detect_edid); | ||
5273 | |||
5274 | if (!IS_ERR_OR_NULL(intel_connector->edid)) | ||
5275 | kfree(intel_connector->edid); | ||
5276 | |||
5277 | /* | ||
5278 | * Can't call intel_dp_is_edp() since the encoder may have been | ||
5279 | * destroyed already. | ||
5280 | */ | ||
5281 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
5282 | intel_panel_fini(&intel_connector->panel); | ||
5283 | |||
5284 | drm_connector_cleanup(connector); | ||
5285 | kfree(connector); | ||
5286 | } | ||
5287 | |||
5288 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) | 5286 | void intel_dp_encoder_destroy(struct drm_encoder *encoder) |
5289 | { | 5287 | { |
5290 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 5288 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
@@ -5348,7 +5346,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, | |||
5348 | dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, | 5346 | dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, |
5349 | an, DRM_HDCP_AN_LEN); | 5347 | an, DRM_HDCP_AN_LEN); |
5350 | if (dpcd_ret != DRM_HDCP_AN_LEN) { | 5348 | if (dpcd_ret != DRM_HDCP_AN_LEN) { |
5351 | DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret); | 5349 | DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n", |
5350 | dpcd_ret); | ||
5352 | return dpcd_ret >= 0 ? -EIO : dpcd_ret; | 5351 | return dpcd_ret >= 0 ? -EIO : dpcd_ret; |
5353 | } | 5352 | } |
5354 | 5353 | ||
@@ -5364,10 +5363,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, | |||
5364 | rxbuf, sizeof(rxbuf), | 5363 | rxbuf, sizeof(rxbuf), |
5365 | DP_AUX_CH_CTL_AUX_AKSV_SELECT); | 5364 | DP_AUX_CH_CTL_AUX_AKSV_SELECT); |
5366 | if (ret < 0) { | 5365 | if (ret < 0) { |
5367 | DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret); | 5366 | DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret); |
5368 | return ret; | 5367 | return ret; |
5369 | } else if (ret == 0) { | 5368 | } else if (ret == 0) { |
5370 | DRM_ERROR("Aksv write over DP/AUX was empty\n"); | 5369 | DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n"); |
5371 | return -EIO; | 5370 | return -EIO; |
5372 | } | 5371 | } |
5373 | 5372 | ||
@@ -5382,7 +5381,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, | |||
5382 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, | 5381 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, |
5383 | DRM_HDCP_KSV_LEN); | 5382 | DRM_HDCP_KSV_LEN); |
5384 | if (ret != DRM_HDCP_KSV_LEN) { | 5383 | if (ret != DRM_HDCP_KSV_LEN) { |
5385 | DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret); | 5384 | DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret); |
5386 | return ret >= 0 ? -EIO : ret; | 5385 | return ret >= 0 ? -EIO : ret; |
5387 | } | 5386 | } |
5388 | return 0; | 5387 | return 0; |
@@ -5400,7 +5399,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, | |||
5400 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, | 5399 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, |
5401 | bstatus, DRM_HDCP_BSTATUS_LEN); | 5400 | bstatus, DRM_HDCP_BSTATUS_LEN); |
5402 | if (ret != DRM_HDCP_BSTATUS_LEN) { | 5401 | if (ret != DRM_HDCP_BSTATUS_LEN) { |
5403 | DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); | 5402 | DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); |
5404 | return ret >= 0 ? -EIO : ret; | 5403 | return ret >= 0 ? -EIO : ret; |
5405 | } | 5404 | } |
5406 | return 0; | 5405 | return 0; |
@@ -5415,7 +5414,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, | |||
5415 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, | 5414 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, |
5416 | bcaps, 1); | 5415 | bcaps, 1); |
5417 | if (ret != 1) { | 5416 | if (ret != 1) { |
5418 | DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret); | 5417 | DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret); |
5419 | return ret >= 0 ? -EIO : ret; | 5418 | return ret >= 0 ? -EIO : ret; |
5420 | } | 5419 | } |
5421 | 5420 | ||
@@ -5445,7 +5444,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, | |||
5445 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, | 5444 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, |
5446 | ri_prime, DRM_HDCP_RI_LEN); | 5445 | ri_prime, DRM_HDCP_RI_LEN); |
5447 | if (ret != DRM_HDCP_RI_LEN) { | 5446 | if (ret != DRM_HDCP_RI_LEN) { |
5448 | DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret); | 5447 | DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret); |
5449 | return ret >= 0 ? -EIO : ret; | 5448 | return ret >= 0 ? -EIO : ret; |
5450 | } | 5449 | } |
5451 | return 0; | 5450 | return 0; |
@@ -5460,7 +5459,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, | |||
5460 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, | 5459 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, |
5461 | &bstatus, 1); | 5460 | &bstatus, 1); |
5462 | if (ret != 1) { | 5461 | if (ret != 1) { |
5463 | DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); | 5462 | DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); |
5464 | return ret >= 0 ? -EIO : ret; | 5463 | return ret >= 0 ? -EIO : ret; |
5465 | } | 5464 | } |
5466 | *ksv_ready = bstatus & DP_BSTATUS_READY; | 5465 | *ksv_ready = bstatus & DP_BSTATUS_READY; |
@@ -5482,8 +5481,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, | |||
5482 | ksv_fifo + i * DRM_HDCP_KSV_LEN, | 5481 | ksv_fifo + i * DRM_HDCP_KSV_LEN, |
5483 | len); | 5482 | len); |
5484 | if (ret != len) { | 5483 | if (ret != len) { |
5485 | DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i, | 5484 | DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n", |
5486 | ret); | 5485 | i, ret); |
5487 | return ret >= 0 ? -EIO : ret; | 5486 | return ret >= 0 ? -EIO : ret; |
5488 | } | 5487 | } |
5489 | } | 5488 | } |
@@ -5503,7 +5502,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, | |||
5503 | DP_AUX_HDCP_V_PRIME(i), part, | 5502 | DP_AUX_HDCP_V_PRIME(i), part, |
5504 | DRM_HDCP_V_PRIME_PART_LEN); | 5503 | DRM_HDCP_V_PRIME_PART_LEN); |
5505 | if (ret != DRM_HDCP_V_PRIME_PART_LEN) { | 5504 | if (ret != DRM_HDCP_V_PRIME_PART_LEN) { |
5506 | DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); | 5505 | DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret); |
5507 | return ret >= 0 ? -EIO : ret; | 5506 | return ret >= 0 ? -EIO : ret; |
5508 | } | 5507 | } |
5509 | return 0; | 5508 | return 0; |
@@ -5526,7 +5525,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) | |||
5526 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, | 5525 | ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, |
5527 | &bstatus, 1); | 5526 | &bstatus, 1); |
5528 | if (ret != 1) { | 5527 | if (ret != 1) { |
5529 | DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret); | 5528 | DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); |
5530 | return false; | 5529 | return false; |
5531 | } | 5530 | } |
5532 | 5531 | ||
@@ -5565,6 +5564,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { | |||
5565 | static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | 5564 | static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) |
5566 | { | 5565 | { |
5567 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 5566 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
5567 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | ||
5568 | 5568 | ||
5569 | lockdep_assert_held(&dev_priv->pps_mutex); | 5569 | lockdep_assert_held(&dev_priv->pps_mutex); |
5570 | 5570 | ||
@@ -5578,7 +5578,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | |||
5578 | * indefinitely. | 5578 | * indefinitely. |
5579 | */ | 5579 | */ |
5580 | DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); | 5580 | DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); |
5581 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 5581 | intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); |
5582 | 5582 | ||
5583 | edp_panel_vdd_schedule_off(intel_dp); | 5583 | edp_panel_vdd_schedule_off(intel_dp); |
5584 | } | 5584 | } |
@@ -5631,7 +5631,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { | |||
5631 | .atomic_set_property = intel_digital_connector_atomic_set_property, | 5631 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
5632 | .late_register = intel_dp_connector_register, | 5632 | .late_register = intel_dp_connector_register, |
5633 | .early_unregister = intel_dp_connector_unregister, | 5633 | .early_unregister = intel_dp_connector_unregister, |
5634 | .destroy = intel_dp_connector_destroy, | 5634 | .destroy = intel_connector_destroy, |
5635 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 5635 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
5636 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | 5636 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
5637 | }; | 5637 | }; |
@@ -5673,11 +5673,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
5673 | 5673 | ||
5674 | if (long_hpd) { | 5674 | if (long_hpd) { |
5675 | intel_dp->reset_link_params = true; | 5675 | intel_dp->reset_link_params = true; |
5676 | intel_dp->detect_done = false; | ||
5677 | return IRQ_NONE; | 5676 | return IRQ_NONE; |
5678 | } | 5677 | } |
5679 | 5678 | ||
5680 | intel_display_power_get(dev_priv, intel_dp->aux_power_domain); | 5679 | intel_display_power_get(dev_priv, |
5680 | intel_aux_power_domain(intel_dig_port)); | ||
5681 | 5681 | ||
5682 | if (intel_dp->is_mst) { | 5682 | if (intel_dp->is_mst) { |
5683 | if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { | 5683 | if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { |
@@ -5690,7 +5690,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
5690 | intel_dp->is_mst = false; | 5690 | intel_dp->is_mst = false; |
5691 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, | 5691 | drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, |
5692 | intel_dp->is_mst); | 5692 | intel_dp->is_mst); |
5693 | intel_dp->detect_done = false; | ||
5694 | goto put_power; | 5693 | goto put_power; |
5695 | } | 5694 | } |
5696 | } | 5695 | } |
@@ -5700,19 +5699,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
5700 | 5699 | ||
5701 | handled = intel_dp_short_pulse(intel_dp); | 5700 | handled = intel_dp_short_pulse(intel_dp); |
5702 | 5701 | ||
5703 | /* Short pulse can signify loss of hdcp authentication */ | 5702 | if (!handled) |
5704 | intel_hdcp_check_link(intel_dp->attached_connector); | ||
5705 | |||
5706 | if (!handled) { | ||
5707 | intel_dp->detect_done = false; | ||
5708 | goto put_power; | 5703 | goto put_power; |
5709 | } | ||
5710 | } | 5704 | } |
5711 | 5705 | ||
5712 | ret = IRQ_HANDLED; | 5706 | ret = IRQ_HANDLED; |
5713 | 5707 | ||
5714 | put_power: | 5708 | put_power: |
5715 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 5709 | intel_display_power_put(dev_priv, |
5710 | intel_aux_power_domain(intel_dig_port)); | ||
5716 | 5711 | ||
5717 | return ret; | 5712 | return ret; |
5718 | } | 5713 | } |
@@ -5743,6 +5738,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect | |||
5743 | intel_attach_force_audio_property(connector); | 5738 | intel_attach_force_audio_property(connector); |
5744 | 5739 | ||
5745 | intel_attach_broadcast_rgb_property(connector); | 5740 | intel_attach_broadcast_rgb_property(connector); |
5741 | if (HAS_GMCH_DISPLAY(dev_priv)) | ||
5742 | drm_connector_attach_max_bpc_property(connector, 6, 10); | ||
5743 | else if (INTEL_GEN(dev_priv) >= 5) | ||
5744 | drm_connector_attach_max_bpc_property(connector, 6, 12); | ||
5746 | 5745 | ||
5747 | if (intel_dp_is_edp(intel_dp)) { | 5746 | if (intel_dp_is_edp(intel_dp)) { |
5748 | u32 allowed_scalers; | 5747 | u32 allowed_scalers; |
@@ -6099,10 +6098,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, | |||
6099 | if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { | 6098 | if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { |
6100 | switch (index) { | 6099 | switch (index) { |
6101 | case DRRS_HIGH_RR: | 6100 | case DRRS_HIGH_RR: |
6102 | intel_dp_set_m_n(intel_crtc, M1_N1); | 6101 | intel_dp_set_m_n(crtc_state, M1_N1); |
6103 | break; | 6102 | break; |
6104 | case DRRS_LOW_RR: | 6103 | case DRRS_LOW_RR: |
6105 | intel_dp_set_m_n(intel_crtc, M2_N2); | 6104 | intel_dp_set_m_n(crtc_state, M2_N2); |
6106 | break; | 6105 | break; |
6107 | case DRRS_MAX_RR: | 6106 | case DRRS_MAX_RR: |
6108 | default: | 6107 | default: |
@@ -6422,6 +6421,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
6422 | if (!intel_dp_is_edp(intel_dp)) | 6421 | if (!intel_dp_is_edp(intel_dp)) |
6423 | return true; | 6422 | return true; |
6424 | 6423 | ||
6424 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work); | ||
6425 | |||
6425 | /* | 6426 | /* |
6426 | * On IBX/CPT we may get here with LVDS already registered. Since the | 6427 | * On IBX/CPT we may get here with LVDS already registered. Since the |
6427 | * driver uses the only internal power sequencer available for both | 6428 | * driver uses the only internal power sequencer available for both |
@@ -6514,6 +6515,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
6514 | intel_connector->panel.backlight.power = intel_edp_backlight_power; | 6515 | intel_connector->panel.backlight.power = intel_edp_backlight_power; |
6515 | intel_panel_setup_backlight(connector, pipe); | 6516 | intel_panel_setup_backlight(connector, pipe); |
6516 | 6517 | ||
6518 | if (fixed_mode) | ||
6519 | drm_connector_init_panel_orientation_property( | ||
6520 | connector, fixed_mode->hdisplay, fixed_mode->vdisplay); | ||
6521 | |||
6517 | return true; | 6522 | return true; |
6518 | 6523 | ||
6519 | out_vdd_off: | 6524 | out_vdd_off: |
@@ -6624,9 +6629,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | |||
6624 | 6629 | ||
6625 | intel_dp_aux_init(intel_dp); | 6630 | intel_dp_aux_init(intel_dp); |
6626 | 6631 | ||
6627 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | ||
6628 | edp_panel_vdd_work); | ||
6629 | |||
6630 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 6632 | intel_connector_attach_encoder(intel_connector, intel_encoder); |
6631 | 6633 | ||
6632 | if (HAS_DDI(dev_priv)) | 6634 | if (HAS_DDI(dev_priv)) |
@@ -6743,6 +6745,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, | |||
6743 | if (port != PORT_A) | 6745 | if (port != PORT_A) |
6744 | intel_infoframe_init(intel_dig_port); | 6746 | intel_infoframe_init(intel_dig_port); |
6745 | 6747 | ||
6748 | intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); | ||
6746 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) | 6749 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) |
6747 | goto err_init_connector; | 6750 | goto err_init_connector; |
6748 | 6751 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 1b00f8ea145b..4de247ddf05f 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
51 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | 51 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
52 | return false; | 52 | return false; |
53 | 53 | ||
54 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
54 | pipe_config->has_pch_encoder = false; | 55 | pipe_config->has_pch_encoder = false; |
55 | bpp = 24; | 56 | bpp = 24; |
56 | if (intel_dp->compliance.test_data.bpc) { | 57 | if (intel_dp->compliance.test_data.bpc) { |
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder, | |||
208 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | 209 | struct intel_digital_port *intel_dig_port = intel_mst->primary; |
209 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 210 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
210 | 211 | ||
211 | if (intel_dp->active_mst_links == 0 && | 212 | if (intel_dp->active_mst_links == 0) |
212 | intel_dig_port->base.pre_pll_enable) | ||
213 | intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, | 213 | intel_dig_port->base.pre_pll_enable(&intel_dig_port->base, |
214 | pipe_config, NULL); | 214 | pipe_config, NULL); |
215 | } | 215 | } |
216 | 216 | ||
217 | static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder, | ||
218 | const struct intel_crtc_state *old_crtc_state, | ||
219 | const struct drm_connector_state *old_conn_state) | ||
220 | { | ||
221 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); | ||
222 | struct intel_digital_port *intel_dig_port = intel_mst->primary; | ||
223 | struct intel_dp *intel_dp = &intel_dig_port->dp; | ||
224 | |||
225 | if (intel_dp->active_mst_links == 0) | ||
226 | intel_dig_port->base.post_pll_disable(&intel_dig_port->base, | ||
227 | old_crtc_state, | ||
228 | old_conn_state); | ||
229 | } | ||
230 | |||
217 | static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, | 231 | static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, |
218 | const struct intel_crtc_state *pipe_config, | 232 | const struct intel_crtc_state *pipe_config, |
219 | const struct drm_connector_state *conn_state) | 233 | const struct drm_connector_state *conn_state) |
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) | |||
335 | intel_connector->port); | 349 | intel_connector->port); |
336 | } | 350 | } |
337 | 351 | ||
338 | static void | ||
339 | intel_dp_mst_connector_destroy(struct drm_connector *connector) | ||
340 | { | ||
341 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
342 | |||
343 | if (!IS_ERR_OR_NULL(intel_connector->edid)) | ||
344 | kfree(intel_connector->edid); | ||
345 | |||
346 | drm_connector_cleanup(connector); | ||
347 | kfree(connector); | ||
348 | } | ||
349 | |||
350 | static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { | 352 | static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { |
351 | .detect = intel_dp_mst_detect, | 353 | .detect = intel_dp_mst_detect, |
352 | .fill_modes = drm_helper_probe_single_connector_modes, | 354 | .fill_modes = drm_helper_probe_single_connector_modes, |
353 | .late_register = intel_connector_register, | 355 | .late_register = intel_connector_register, |
354 | .early_unregister = intel_connector_unregister, | 356 | .early_unregister = intel_connector_unregister, |
355 | .destroy = intel_dp_mst_connector_destroy, | 357 | .destroy = intel_connector_destroy, |
356 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 358 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
357 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 359 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
358 | }; | 360 | }; |
@@ -452,6 +454,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo | |||
452 | if (!intel_connector) | 454 | if (!intel_connector) |
453 | return NULL; | 455 | return NULL; |
454 | 456 | ||
457 | intel_connector->get_hw_state = intel_dp_mst_get_hw_state; | ||
458 | intel_connector->mst_port = intel_dp; | ||
459 | intel_connector->port = port; | ||
460 | |||
455 | connector = &intel_connector->base; | 461 | connector = &intel_connector->base; |
456 | ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, | 462 | ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, |
457 | DRM_MODE_CONNECTOR_DisplayPort); | 463 | DRM_MODE_CONNECTOR_DisplayPort); |
@@ -462,10 +468,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo | |||
462 | 468 | ||
463 | drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); | 469 | drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); |
464 | 470 | ||
465 | intel_connector->get_hw_state = intel_dp_mst_get_hw_state; | ||
466 | intel_connector->mst_port = intel_dp; | ||
467 | intel_connector->port = port; | ||
468 | |||
469 | for_each_pipe(dev_priv, pipe) { | 471 | for_each_pipe(dev_priv, pipe) { |
470 | struct drm_encoder *enc = | 472 | struct drm_encoder *enc = |
471 | &intel_dp->mst_encoders[pipe]->base.base; | 473 | &intel_dp->mst_encoders[pipe]->base.base; |
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum | |||
560 | intel_encoder->disable = intel_mst_disable_dp; | 562 | intel_encoder->disable = intel_mst_disable_dp; |
561 | intel_encoder->post_disable = intel_mst_post_disable_dp; | 563 | intel_encoder->post_disable = intel_mst_post_disable_dp; |
562 | intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; | 564 | intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp; |
565 | intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp; | ||
563 | intel_encoder->pre_enable = intel_mst_pre_enable_dp; | 566 | intel_encoder->pre_enable = intel_mst_pre_enable_dp; |
564 | intel_encoder->enable = intel_mst_enable_dp; | 567 | intel_encoder->enable = intel_mst_enable_dp; |
565 | intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; | 568 | intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state; |
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 00b3ab656b06..3c7f10d17658 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c | |||
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, | |||
748 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; | 748 | val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET; |
749 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); | 749 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); |
750 | 750 | ||
751 | if (crtc->config->lane_count > 2) { | 751 | if (crtc_state->lane_count > 2) { |
752 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); | 752 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); |
753 | if (reset) | 753 | if (reset) |
754 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); | 754 | val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); |
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, | |||
765 | val |= DPIO_PCS_CLK_SOFT_RESET; | 765 | val |= DPIO_PCS_CLK_SOFT_RESET; |
766 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); | 766 | vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); |
767 | 767 | ||
768 | if (crtc->config->lane_count > 2) { | 768 | if (crtc_state->lane_count > 2) { |
769 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); | 769 | val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); |
770 | val |= CHV_PCS_REQ_SOFTRESET_EN; | 770 | val |= CHV_PCS_REQ_SOFTRESET_EN; |
771 | if (reset) | 771 | if (reset) |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index e6cac9225536..901e15063b24 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, | |||
126 | 126 | ||
127 | /** | 127 | /** |
128 | * intel_prepare_shared_dpll - call a dpll's prepare hook | 128 | * intel_prepare_shared_dpll - call a dpll's prepare hook |
129 | * @crtc: CRTC which has a shared dpll | 129 | * @crtc_state: CRTC, and its state, which has a shared dpll |
130 | * | 130 | * |
131 | * This calls the PLL's prepare hook if it has one and if the PLL is not | 131 | * This calls the PLL's prepare hook if it has one and if the PLL is not |
132 | * already enabled. The prepare hook is platform specific. | 132 | * already enabled. The prepare hook is platform specific. |
133 | */ | 133 | */ |
134 | void intel_prepare_shared_dpll(struct intel_crtc *crtc) | 134 | void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state) |
135 | { | 135 | { |
136 | struct drm_device *dev = crtc->base.dev; | 136 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
137 | struct drm_i915_private *dev_priv = to_i915(dev); | 137 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
138 | struct intel_shared_dpll *pll = crtc->config->shared_dpll; | 138 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; |
139 | 139 | ||
140 | if (WARN_ON(pll == NULL)) | 140 | if (WARN_ON(pll == NULL)) |
141 | return; | 141 | return; |
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc) | |||
154 | 154 | ||
155 | /** | 155 | /** |
156 | * intel_enable_shared_dpll - enable a CRTC's shared DPLL | 156 | * intel_enable_shared_dpll - enable a CRTC's shared DPLL |
157 | * @crtc: CRTC which has a shared DPLL | 157 | * @crtc_state: CRTC, and its state, which has a shared DPLL |
158 | * | 158 | * |
159 | * Enable the shared DPLL used by @crtc. | 159 | * Enable the shared DPLL used by @crtc. |
160 | */ | 160 | */ |
161 | void intel_enable_shared_dpll(struct intel_crtc *crtc) | 161 | void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state) |
162 | { | 162 | { |
163 | struct drm_device *dev = crtc->base.dev; | 163 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
164 | struct drm_i915_private *dev_priv = to_i915(dev); | 164 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
165 | struct intel_shared_dpll *pll = crtc->config->shared_dpll; | 165 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; |
166 | unsigned int crtc_mask = drm_crtc_mask(&crtc->base); | 166 | unsigned int crtc_mask = drm_crtc_mask(&crtc->base); |
167 | unsigned int old_mask; | 167 | unsigned int old_mask; |
168 | 168 | ||
@@ -199,14 +199,15 @@ out: | |||
199 | 199 | ||
200 | /** | 200 | /** |
201 | * intel_disable_shared_dpll - disable a CRTC's shared DPLL | 201 | * intel_disable_shared_dpll - disable a CRTC's shared DPLL |
202 | * @crtc: CRTC which has a shared DPLL | 202 | * @crtc_state: CRTC, and its state, which has a shared DPLL |
203 | * | 203 | * |
204 | * Disable the shared DPLL used by @crtc. | 204 | * Disable the shared DPLL used by @crtc. |
205 | */ | 205 | */ |
206 | void intel_disable_shared_dpll(struct intel_crtc *crtc) | 206 | void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state) |
207 | { | 207 | { |
208 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
208 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); | 209 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
209 | struct intel_shared_dpll *pll = crtc->config->shared_dpll; | 210 | struct intel_shared_dpll *pll = crtc_state->shared_dpll; |
210 | unsigned int crtc_mask = drm_crtc_mask(&crtc->base); | 211 | unsigned int crtc_mask = drm_crtc_mask(&crtc->base); |
211 | 212 | ||
212 | /* PCH only available on ILK+ */ | 213 | /* PCH only available on ILK+ */ |
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, | |||
409 | struct intel_shared_dpll *pll) | 410 | struct intel_shared_dpll *pll) |
410 | { | 411 | { |
411 | const enum intel_dpll_id id = pll->info->id; | 412 | const enum intel_dpll_id id = pll->info->id; |
412 | struct drm_device *dev = &dev_priv->drm; | ||
413 | struct intel_crtc *crtc; | ||
414 | |||
415 | /* Make sure no transcoder isn't still depending on us. */ | ||
416 | for_each_intel_crtc(dev, crtc) { | ||
417 | if (crtc->config->shared_dpll == pll) | ||
418 | assert_pch_transcoder_disabled(dev_priv, crtc->pipe); | ||
419 | } | ||
420 | 413 | ||
421 | I915_WRITE(PCH_DPLL(id), 0); | 414 | I915_WRITE(PCH_DPLL(id), 0); |
422 | POSTING_READ(PCH_DPLL(id)); | 415 | POSTING_READ(PCH_DPLL(id)); |
@@ -2628,11 +2621,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id) | |||
2628 | return id - DPLL_ID_ICL_MGPLL1 + PORT_C; | 2621 | return id - DPLL_ID_ICL_MGPLL1 + PORT_C; |
2629 | } | 2622 | } |
2630 | 2623 | ||
2631 | static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) | 2624 | enum intel_dpll_id icl_port_to_mg_pll_id(enum port port) |
2632 | { | 2625 | { |
2633 | return port - PORT_C + DPLL_ID_ICL_MGPLL1; | 2626 | return port - PORT_C + DPLL_ID_ICL_MGPLL1; |
2634 | } | 2627 | } |
2635 | 2628 | ||
2629 | bool intel_dpll_is_combophy(enum intel_dpll_id id) | ||
2630 | { | ||
2631 | return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1; | ||
2632 | } | ||
2633 | |||
2636 | static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, | 2634 | static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, |
2637 | uint32_t *target_dco_khz, | 2635 | uint32_t *target_dco_khz, |
2638 | struct intel_dpll_hw_state *state) | 2636 | struct intel_dpll_hw_state *state) |
@@ -2874,8 +2872,8 @@ static struct intel_shared_dpll * | |||
2874 | icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | 2872 | icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, |
2875 | struct intel_encoder *encoder) | 2873 | struct intel_encoder *encoder) |
2876 | { | 2874 | { |
2877 | struct intel_digital_port *intel_dig_port = | 2875 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2878 | enc_to_dig_port(&encoder->base); | 2876 | struct intel_digital_port *intel_dig_port; |
2879 | struct intel_shared_dpll *pll; | 2877 | struct intel_shared_dpll *pll; |
2880 | struct intel_dpll_hw_state pll_state = {}; | 2878 | struct intel_dpll_hw_state pll_state = {}; |
2881 | enum port port = encoder->port; | 2879 | enum port port = encoder->port; |
@@ -2883,18 +2881,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
2883 | int clock = crtc_state->port_clock; | 2881 | int clock = crtc_state->port_clock; |
2884 | bool ret; | 2882 | bool ret; |
2885 | 2883 | ||
2886 | switch (port) { | 2884 | if (intel_port_is_combophy(dev_priv, port)) { |
2887 | case PORT_A: | ||
2888 | case PORT_B: | ||
2889 | min = DPLL_ID_ICL_DPLL0; | 2885 | min = DPLL_ID_ICL_DPLL0; |
2890 | max = DPLL_ID_ICL_DPLL1; | 2886 | max = DPLL_ID_ICL_DPLL1; |
2891 | ret = icl_calc_dpll_state(crtc_state, encoder, clock, | 2887 | ret = icl_calc_dpll_state(crtc_state, encoder, clock, |
2892 | &pll_state); | 2888 | &pll_state); |
2893 | break; | 2889 | } else if (intel_port_is_tc(dev_priv, port)) { |
2894 | case PORT_C: | 2890 | if (encoder->type == INTEL_OUTPUT_DP_MST) { |
2895 | case PORT_D: | 2891 | struct intel_dp_mst_encoder *mst_encoder; |
2896 | case PORT_E: | 2892 | |
2897 | case PORT_F: | 2893 | mst_encoder = enc_to_mst(&encoder->base); |
2894 | intel_dig_port = mst_encoder->primary; | ||
2895 | } else { | ||
2896 | intel_dig_port = enc_to_dig_port(&encoder->base); | ||
2897 | } | ||
2898 | |||
2898 | if (intel_dig_port->tc_type == TC_PORT_TBT) { | 2899 | if (intel_dig_port->tc_type == TC_PORT_TBT) { |
2899 | min = DPLL_ID_ICL_TBTPLL; | 2900 | min = DPLL_ID_ICL_TBTPLL; |
2900 | max = min; | 2901 | max = min; |
@@ -2906,8 +2907,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
2906 | ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, | 2907 | ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, |
2907 | &pll_state); | 2908 | &pll_state); |
2908 | } | 2909 | } |
2909 | break; | 2910 | } else { |
2910 | default: | ||
2911 | MISSING_CASE(port); | 2911 | MISSING_CASE(port); |
2912 | return NULL; | 2912 | return NULL; |
2913 | } | 2913 | } |
@@ -2932,21 +2932,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
2932 | 2932 | ||
2933 | static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) | 2933 | static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) |
2934 | { | 2934 | { |
2935 | switch (id) { | 2935 | if (intel_dpll_is_combophy(id)) |
2936 | default: | ||
2937 | MISSING_CASE(id); | ||
2938 | /* fall through */ | ||
2939 | case DPLL_ID_ICL_DPLL0: | ||
2940 | case DPLL_ID_ICL_DPLL1: | ||
2941 | return CNL_DPLL_ENABLE(id); | 2936 | return CNL_DPLL_ENABLE(id); |
2942 | case DPLL_ID_ICL_TBTPLL: | 2937 | else if (id == DPLL_ID_ICL_TBTPLL) |
2943 | return TBT_PLL_ENABLE; | 2938 | return TBT_PLL_ENABLE; |
2944 | case DPLL_ID_ICL_MGPLL1: | 2939 | else |
2945 | case DPLL_ID_ICL_MGPLL2: | 2940 | /* |
2946 | case DPLL_ID_ICL_MGPLL3: | 2941 | * TODO: Make MG_PLL macros use |
2947 | case DPLL_ID_ICL_MGPLL4: | 2942 | * tc port id instead of port id |
2943 | */ | ||
2948 | return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); | 2944 | return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id)); |
2949 | } | ||
2950 | } | 2945 | } |
2951 | 2946 | ||
2952 | static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, | 2947 | static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, |
@@ -2965,17 +2960,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
2965 | if (!(val & PLL_ENABLE)) | 2960 | if (!(val & PLL_ENABLE)) |
2966 | goto out; | 2961 | goto out; |
2967 | 2962 | ||
2968 | switch (id) { | 2963 | if (intel_dpll_is_combophy(id) || |
2969 | case DPLL_ID_ICL_DPLL0: | 2964 | id == DPLL_ID_ICL_TBTPLL) { |
2970 | case DPLL_ID_ICL_DPLL1: | ||
2971 | case DPLL_ID_ICL_TBTPLL: | ||
2972 | hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); | 2965 | hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); |
2973 | hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); | 2966 | hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); |
2974 | break; | 2967 | } else { |
2975 | case DPLL_ID_ICL_MGPLL1: | ||
2976 | case DPLL_ID_ICL_MGPLL2: | ||
2977 | case DPLL_ID_ICL_MGPLL3: | ||
2978 | case DPLL_ID_ICL_MGPLL4: | ||
2979 | port = icl_mg_pll_id_to_port(id); | 2968 | port = icl_mg_pll_id_to_port(id); |
2980 | hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); | 2969 | hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port)); |
2981 | hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; | 2970 | hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; |
@@ -3013,9 +3002,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
3013 | 3002 | ||
3014 | hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; | 3003 | hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; |
3015 | hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; | 3004 | hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; |
3016 | break; | ||
3017 | default: | ||
3018 | MISSING_CASE(id); | ||
3019 | } | 3005 | } |
3020 | 3006 | ||
3021 | ret = true; | 3007 | ret = true; |
@@ -3104,21 +3090,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, | |||
3104 | PLL_POWER_STATE, 1)) | 3090 | PLL_POWER_STATE, 1)) |
3105 | DRM_ERROR("PLL %d Power not enabled\n", id); | 3091 | DRM_ERROR("PLL %d Power not enabled\n", id); |
3106 | 3092 | ||
3107 | switch (id) { | 3093 | if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL) |
3108 | case DPLL_ID_ICL_DPLL0: | ||
3109 | case DPLL_ID_ICL_DPLL1: | ||
3110 | case DPLL_ID_ICL_TBTPLL: | ||
3111 | icl_dpll_write(dev_priv, pll); | 3094 | icl_dpll_write(dev_priv, pll); |
3112 | break; | 3095 | else |
3113 | case DPLL_ID_ICL_MGPLL1: | ||
3114 | case DPLL_ID_ICL_MGPLL2: | ||
3115 | case DPLL_ID_ICL_MGPLL3: | ||
3116 | case DPLL_ID_ICL_MGPLL4: | ||
3117 | icl_mg_pll_write(dev_priv, pll); | 3096 | icl_mg_pll_write(dev_priv, pll); |
3118 | break; | ||
3119 | default: | ||
3120 | MISSING_CASE(id); | ||
3121 | } | ||
3122 | 3097 | ||
3123 | /* | 3098 | /* |
3124 | * DVFS pre sequence would be here, but in our driver the cdclk code | 3099 | * DVFS pre sequence would be here, but in our driver the cdclk code |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index bf0de8a4dc63..a033d8f06d4a 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h | |||
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
334 | void intel_release_shared_dpll(struct intel_shared_dpll *dpll, | 334 | void intel_release_shared_dpll(struct intel_shared_dpll *dpll, |
335 | struct intel_crtc *crtc, | 335 | struct intel_crtc *crtc, |
336 | struct drm_atomic_state *state); | 336 | struct drm_atomic_state *state); |
337 | void intel_prepare_shared_dpll(struct intel_crtc *crtc); | 337 | void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state); |
338 | void intel_enable_shared_dpll(struct intel_crtc *crtc); | 338 | void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state); |
339 | void intel_disable_shared_dpll(struct intel_crtc *crtc); | 339 | void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state); |
340 | void intel_shared_dpll_swap_state(struct drm_atomic_state *state); | 340 | void intel_shared_dpll_swap_state(struct drm_atomic_state *state); |
341 | void intel_shared_dpll_init(struct drm_device *dev); | 341 | void intel_shared_dpll_init(struct drm_device *dev); |
342 | 342 | ||
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, | |||
345 | int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, | 345 | int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, |
346 | uint32_t pll_id); | 346 | uint32_t pll_id); |
347 | int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); | 347 | int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); |
348 | enum intel_dpll_id icl_port_to_mg_pll_id(enum port port); | ||
349 | bool intel_dpll_is_combophy(enum intel_dpll_id id); | ||
348 | 350 | ||
349 | #endif /* _INTEL_DPLL_MGR_H_ */ | 351 | #endif /* _INTEL_DPLL_MGR_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f8dc84b2d2d3..a7d9ac912125 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -381,6 +381,15 @@ struct intel_hdcp_shim { | |||
381 | bool *hdcp_capable); | 381 | bool *hdcp_capable); |
382 | }; | 382 | }; |
383 | 383 | ||
384 | struct intel_hdcp { | ||
385 | const struct intel_hdcp_shim *shim; | ||
386 | /* Mutex for hdcp state of the connector */ | ||
387 | struct mutex mutex; | ||
388 | u64 value; | ||
389 | struct delayed_work check_work; | ||
390 | struct work_struct prop_work; | ||
391 | }; | ||
392 | |||
384 | struct intel_connector { | 393 | struct intel_connector { |
385 | struct drm_connector base; | 394 | struct drm_connector base; |
386 | /* | 395 | /* |
@@ -413,11 +422,7 @@ struct intel_connector { | |||
413 | /* Work struct to schedule a uevent on link train failure */ | 422 | /* Work struct to schedule a uevent on link train failure */ |
414 | struct work_struct modeset_retry_work; | 423 | struct work_struct modeset_retry_work; |
415 | 424 | ||
416 | const struct intel_hdcp_shim *hdcp_shim; | 425 | struct intel_hdcp hdcp; |
417 | struct mutex hdcp_mutex; | ||
418 | uint64_t hdcp_value; /* protected by hdcp_mutex */ | ||
419 | struct delayed_work hdcp_check_work; | ||
420 | struct work_struct hdcp_prop_work; | ||
421 | }; | 426 | }; |
422 | 427 | ||
423 | struct intel_digital_connector_state { | 428 | struct intel_digital_connector_state { |
@@ -539,6 +544,26 @@ struct intel_plane_state { | |||
539 | */ | 544 | */ |
540 | int scaler_id; | 545 | int scaler_id; |
541 | 546 | ||
547 | /* | ||
548 | * linked_plane: | ||
549 | * | ||
550 | * ICL planar formats require 2 planes that are updated as pairs. | ||
551 | * This member is used to make sure the other plane is also updated | ||
552 | * when required, and for update_slave() to find the correct | ||
553 | * plane_state to pass as argument. | ||
554 | */ | ||
555 | struct intel_plane *linked_plane; | ||
556 | |||
557 | /* | ||
558 | * slave: | ||
559 | * If set don't update use the linked plane's state for updating | ||
560 | * this plane during atomic commit with the update_slave() callback. | ||
561 | * | ||
562 | * It's also used by the watermark code to ignore wm calculations on | ||
563 | * this plane. They're calculated by the linked plane's wm code. | ||
564 | */ | ||
565 | u32 slave; | ||
566 | |||
542 | struct drm_intel_sprite_colorkey ckey; | 567 | struct drm_intel_sprite_colorkey ckey; |
543 | }; | 568 | }; |
544 | 569 | ||
@@ -547,6 +572,7 @@ struct intel_initial_plane_config { | |||
547 | unsigned int tiling; | 572 | unsigned int tiling; |
548 | int size; | 573 | int size; |
549 | u32 base; | 574 | u32 base; |
575 | u8 rotation; | ||
550 | }; | 576 | }; |
551 | 577 | ||
552 | #define SKL_MIN_SRC_W 8 | 578 | #define SKL_MIN_SRC_W 8 |
@@ -712,6 +738,13 @@ struct intel_crtc_wm_state { | |||
712 | bool need_postvbl_update; | 738 | bool need_postvbl_update; |
713 | }; | 739 | }; |
714 | 740 | ||
741 | enum intel_output_format { | ||
742 | INTEL_OUTPUT_FORMAT_INVALID, | ||
743 | INTEL_OUTPUT_FORMAT_RGB, | ||
744 | INTEL_OUTPUT_FORMAT_YCBCR420, | ||
745 | INTEL_OUTPUT_FORMAT_YCBCR444, | ||
746 | }; | ||
747 | |||
715 | struct intel_crtc_state { | 748 | struct intel_crtc_state { |
716 | struct drm_crtc_state base; | 749 | struct drm_crtc_state base; |
717 | 750 | ||
@@ -899,8 +932,11 @@ struct intel_crtc_state { | |||
899 | /* HDMI High TMDS char rate ratio */ | 932 | /* HDMI High TMDS char rate ratio */ |
900 | bool hdmi_high_tmds_clock_ratio; | 933 | bool hdmi_high_tmds_clock_ratio; |
901 | 934 | ||
902 | /* output format is YCBCR 4:2:0 */ | 935 | /* Output format RGB/YCBCR etc */ |
903 | bool ycbcr420; | 936 | enum intel_output_format output_format; |
937 | |||
938 | /* Output down scaling is done in LSPCON device */ | ||
939 | bool lspcon_downsampling; | ||
904 | }; | 940 | }; |
905 | 941 | ||
906 | struct intel_crtc { | 942 | struct intel_crtc { |
@@ -973,6 +1009,9 @@ struct intel_plane { | |||
973 | void (*update_plane)(struct intel_plane *plane, | 1009 | void (*update_plane)(struct intel_plane *plane, |
974 | const struct intel_crtc_state *crtc_state, | 1010 | const struct intel_crtc_state *crtc_state, |
975 | const struct intel_plane_state *plane_state); | 1011 | const struct intel_plane_state *plane_state); |
1012 | void (*update_slave)(struct intel_plane *plane, | ||
1013 | const struct intel_crtc_state *crtc_state, | ||
1014 | const struct intel_plane_state *plane_state); | ||
976 | void (*disable_plane)(struct intel_plane *plane, | 1015 | void (*disable_plane)(struct intel_plane *plane, |
977 | struct intel_crtc *crtc); | 1016 | struct intel_crtc *crtc); |
978 | bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); | 1017 | bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); |
@@ -1070,13 +1109,13 @@ struct intel_dp { | |||
1070 | bool link_mst; | 1109 | bool link_mst; |
1071 | bool link_trained; | 1110 | bool link_trained; |
1072 | bool has_audio; | 1111 | bool has_audio; |
1073 | bool detect_done; | ||
1074 | bool reset_link_params; | 1112 | bool reset_link_params; |
1075 | enum aux_ch aux_ch; | ||
1076 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; | 1113 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
1077 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; | 1114 | uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; |
1078 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; | 1115 | uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; |
1079 | uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; | 1116 | uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; |
1117 | u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]; | ||
1118 | u8 fec_capable; | ||
1080 | /* source rates */ | 1119 | /* source rates */ |
1081 | int num_source_rates; | 1120 | int num_source_rates; |
1082 | const int *source_rates; | 1121 | const int *source_rates; |
@@ -1094,7 +1133,6 @@ struct intel_dp { | |||
1094 | /* sink or branch descriptor */ | 1133 | /* sink or branch descriptor */ |
1095 | struct drm_dp_desc desc; | 1134 | struct drm_dp_desc desc; |
1096 | struct drm_dp_aux aux; | 1135 | struct drm_dp_aux aux; |
1097 | enum intel_display_power_domain aux_power_domain; | ||
1098 | uint8_t train_set[4]; | 1136 | uint8_t train_set[4]; |
1099 | int panel_power_up_delay; | 1137 | int panel_power_up_delay; |
1100 | int panel_power_down_delay; | 1138 | int panel_power_down_delay; |
@@ -1156,9 +1194,15 @@ struct intel_dp { | |||
1156 | struct intel_dp_compliance compliance; | 1194 | struct intel_dp_compliance compliance; |
1157 | }; | 1195 | }; |
1158 | 1196 | ||
1197 | enum lspcon_vendor { | ||
1198 | LSPCON_VENDOR_MCA, | ||
1199 | LSPCON_VENDOR_PARADE | ||
1200 | }; | ||
1201 | |||
1159 | struct intel_lspcon { | 1202 | struct intel_lspcon { |
1160 | bool active; | 1203 | bool active; |
1161 | enum drm_lspcon_mode mode; | 1204 | enum drm_lspcon_mode mode; |
1205 | enum lspcon_vendor vendor; | ||
1162 | }; | 1206 | }; |
1163 | 1207 | ||
1164 | struct intel_digital_port { | 1208 | struct intel_digital_port { |
@@ -1170,18 +1214,20 @@ struct intel_digital_port { | |||
1170 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); | 1214 | enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool); |
1171 | bool release_cl2_override; | 1215 | bool release_cl2_override; |
1172 | uint8_t max_lanes; | 1216 | uint8_t max_lanes; |
1217 | /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ | ||
1218 | enum aux_ch aux_ch; | ||
1173 | enum intel_display_power_domain ddi_io_power_domain; | 1219 | enum intel_display_power_domain ddi_io_power_domain; |
1174 | enum tc_port_type tc_type; | 1220 | enum tc_port_type tc_type; |
1175 | 1221 | ||
1176 | void (*write_infoframe)(struct drm_encoder *encoder, | 1222 | void (*write_infoframe)(struct intel_encoder *encoder, |
1177 | const struct intel_crtc_state *crtc_state, | 1223 | const struct intel_crtc_state *crtc_state, |
1178 | unsigned int type, | 1224 | unsigned int type, |
1179 | const void *frame, ssize_t len); | 1225 | const void *frame, ssize_t len); |
1180 | void (*set_infoframes)(struct drm_encoder *encoder, | 1226 | void (*set_infoframes)(struct intel_encoder *encoder, |
1181 | bool enable, | 1227 | bool enable, |
1182 | const struct intel_crtc_state *crtc_state, | 1228 | const struct intel_crtc_state *crtc_state, |
1183 | const struct drm_connector_state *conn_state); | 1229 | const struct drm_connector_state *conn_state); |
1184 | bool (*infoframe_enabled)(struct drm_encoder *encoder, | 1230 | bool (*infoframe_enabled)(struct intel_encoder *encoder, |
1185 | const struct intel_crtc_state *pipe_config); | 1231 | const struct intel_crtc_state *pipe_config); |
1186 | }; | 1232 | }; |
1187 | 1233 | ||
@@ -1281,6 +1327,12 @@ enc_to_dig_port(struct drm_encoder *encoder) | |||
1281 | return NULL; | 1327 | return NULL; |
1282 | } | 1328 | } |
1283 | 1329 | ||
1330 | static inline struct intel_digital_port * | ||
1331 | conn_to_dig_port(struct intel_connector *connector) | ||
1332 | { | ||
1333 | return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); | ||
1334 | } | ||
1335 | |||
1284 | static inline struct intel_dp_mst_encoder * | 1336 | static inline struct intel_dp_mst_encoder * |
1285 | enc_to_mst(struct drm_encoder *encoder) | 1337 | enc_to_mst(struct drm_encoder *encoder) |
1286 | { | 1338 | { |
@@ -1306,6 +1358,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder) | |||
1306 | } | 1358 | } |
1307 | } | 1359 | } |
1308 | 1360 | ||
1361 | static inline struct intel_lspcon * | ||
1362 | enc_to_intel_lspcon(struct drm_encoder *encoder) | ||
1363 | { | ||
1364 | return &enc_to_dig_port(encoder)->lspcon; | ||
1365 | } | ||
1366 | |||
1309 | static inline struct intel_digital_port * | 1367 | static inline struct intel_digital_port * |
1310 | dp_to_dig_port(struct intel_dp *intel_dp) | 1368 | dp_to_dig_port(struct intel_dp *intel_dp) |
1311 | { | 1369 | { |
@@ -1331,6 +1389,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) | |||
1331 | } | 1389 | } |
1332 | 1390 | ||
1333 | static inline struct intel_plane_state * | 1391 | static inline struct intel_plane_state * |
1392 | intel_atomic_get_plane_state(struct intel_atomic_state *state, | ||
1393 | struct intel_plane *plane) | ||
1394 | { | ||
1395 | struct drm_plane_state *ret = | ||
1396 | drm_atomic_get_plane_state(&state->base, &plane->base); | ||
1397 | |||
1398 | if (IS_ERR(ret)) | ||
1399 | return ERR_CAST(ret); | ||
1400 | |||
1401 | return to_intel_plane_state(ret); | ||
1402 | } | ||
1403 | |||
1404 | static inline struct intel_plane_state * | ||
1405 | intel_atomic_get_old_plane_state(struct intel_atomic_state *state, | ||
1406 | struct intel_plane *plane) | ||
1407 | { | ||
1408 | return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base, | ||
1409 | &plane->base)); | ||
1410 | } | ||
1411 | |||
1412 | static inline struct intel_plane_state * | ||
1334 | intel_atomic_get_new_plane_state(struct intel_atomic_state *state, | 1413 | intel_atomic_get_new_plane_state(struct intel_atomic_state *state, |
1335 | struct intel_plane *plane) | 1414 | struct intel_plane *plane) |
1336 | { | 1415 | { |
@@ -1444,6 +1523,7 @@ void icl_map_plls_to_ports(struct drm_crtc *crtc, | |||
1444 | void icl_unmap_plls_to_ports(struct drm_crtc *crtc, | 1523 | void icl_unmap_plls_to_ports(struct drm_crtc *crtc, |
1445 | struct intel_crtc_state *crtc_state, | 1524 | struct intel_crtc_state *crtc_state, |
1446 | struct drm_atomic_state *old_state); | 1525 | struct drm_atomic_state *old_state); |
1526 | void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); | ||
1447 | 1527 | ||
1448 | unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, | 1528 | unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, |
1449 | int color_plane, unsigned int height); | 1529 | int color_plane, unsigned int height); |
@@ -1488,7 +1568,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, | |||
1488 | void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); | 1568 | void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); |
1489 | void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); | 1569 | void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); |
1490 | enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); | 1570 | enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc); |
1491 | void intel_update_rawclk(struct drm_i915_private *dev_priv); | ||
1492 | int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); | 1571 | int vlv_get_hpll_vco(struct drm_i915_private *dev_priv); |
1493 | int vlv_get_cck_clock(struct drm_i915_private *dev_priv, | 1572 | int vlv_get_cck_clock(struct drm_i915_private *dev_priv, |
1494 | const char *name, u32 reg, int ref_freq); | 1573 | const char *name, u32 reg, int ref_freq); |
@@ -1509,20 +1588,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv); | |||
1509 | int intel_display_suspend(struct drm_device *dev); | 1588 | int intel_display_suspend(struct drm_device *dev); |
1510 | void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); | 1589 | void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv); |
1511 | void intel_encoder_destroy(struct drm_encoder *encoder); | 1590 | void intel_encoder_destroy(struct drm_encoder *encoder); |
1512 | int intel_connector_init(struct intel_connector *); | ||
1513 | struct intel_connector *intel_connector_alloc(void); | ||
1514 | void intel_connector_free(struct intel_connector *connector); | ||
1515 | bool intel_connector_get_hw_state(struct intel_connector *connector); | ||
1516 | void intel_connector_attach_encoder(struct intel_connector *connector, | ||
1517 | struct intel_encoder *encoder); | ||
1518 | struct drm_display_mode * | 1591 | struct drm_display_mode * |
1519 | intel_encoder_current_mode(struct intel_encoder *encoder); | 1592 | intel_encoder_current_mode(struct intel_encoder *encoder); |
1520 | bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); | 1593 | bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port); |
1521 | bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); | 1594 | bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port); |
1522 | enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, | 1595 | enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, |
1523 | enum port port); | 1596 | enum port port); |
1524 | |||
1525 | enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); | ||
1526 | int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, | 1597 | int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, |
1527 | struct drm_file *file_priv); | 1598 | struct drm_file *file_priv); |
1528 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, | 1599 | enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, |
@@ -1628,9 +1699,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv); | |||
1628 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); | 1699 | void bxt_disable_dc9(struct drm_i915_private *dev_priv); |
1629 | void gen9_enable_dc5(struct drm_i915_private *dev_priv); | 1700 | void gen9_enable_dc5(struct drm_i915_private *dev_priv); |
1630 | unsigned int skl_cdclk_get_vco(unsigned int freq); | 1701 | unsigned int skl_cdclk_get_vco(unsigned int freq); |
1702 | void skl_enable_dc6(struct drm_i915_private *dev_priv); | ||
1631 | void intel_dp_get_m_n(struct intel_crtc *crtc, | 1703 | void intel_dp_get_m_n(struct intel_crtc *crtc, |
1632 | struct intel_crtc_state *pipe_config); | 1704 | struct intel_crtc_state *pipe_config); |
1633 | void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); | 1705 | void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, |
1706 | enum link_m_n_set m_n); | ||
1634 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); | 1707 | int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); |
1635 | bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, | 1708 | bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, |
1636 | struct dpll *best_clock); | 1709 | struct dpll *best_clock); |
@@ -1641,12 +1714,14 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); | |||
1641 | void hsw_enable_ips(const struct intel_crtc_state *crtc_state); | 1714 | void hsw_enable_ips(const struct intel_crtc_state *crtc_state); |
1642 | void hsw_disable_ips(const struct intel_crtc_state *crtc_state); | 1715 | void hsw_disable_ips(const struct intel_crtc_state *crtc_state); |
1643 | enum intel_display_power_domain intel_port_to_power_domain(enum port port); | 1716 | enum intel_display_power_domain intel_port_to_power_domain(enum port port); |
1717 | enum intel_display_power_domain | ||
1718 | intel_aux_power_domain(struct intel_digital_port *dig_port); | ||
1644 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, | 1719 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, |
1645 | struct intel_crtc_state *pipe_config); | 1720 | struct intel_crtc_state *pipe_config); |
1646 | void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, | 1721 | void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, |
1647 | struct intel_crtc_state *crtc_state); | 1722 | struct intel_crtc_state *crtc_state); |
1648 | 1723 | ||
1649 | u16 skl_scaler_calc_phase(int sub, bool chroma_center); | 1724 | u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); |
1650 | int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); | 1725 | int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); |
1651 | int skl_max_scale(const struct intel_crtc_state *crtc_state, | 1726 | int skl_max_scale(const struct intel_crtc_state *crtc_state, |
1652 | u32 pixel_format); | 1727 | u32 pixel_format); |
@@ -1670,6 +1745,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane, | |||
1670 | u32 pixel_format, u64 modifier, | 1745 | u32 pixel_format, u64 modifier, |
1671 | unsigned int rotation); | 1746 | unsigned int rotation); |
1672 | 1747 | ||
1748 | /* intel_connector.c */ | ||
1749 | int intel_connector_init(struct intel_connector *connector); | ||
1750 | struct intel_connector *intel_connector_alloc(void); | ||
1751 | void intel_connector_free(struct intel_connector *connector); | ||
1752 | void intel_connector_destroy(struct drm_connector *connector); | ||
1753 | int intel_connector_register(struct drm_connector *connector); | ||
1754 | void intel_connector_unregister(struct drm_connector *connector); | ||
1755 | void intel_connector_attach_encoder(struct intel_connector *connector, | ||
1756 | struct intel_encoder *encoder); | ||
1757 | bool intel_connector_get_hw_state(struct intel_connector *connector); | ||
1758 | enum pipe intel_connector_get_pipe(struct intel_connector *connector); | ||
1759 | int intel_connector_update_modes(struct drm_connector *connector, | ||
1760 | struct edid *edid); | ||
1761 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | ||
1762 | void intel_attach_force_audio_property(struct drm_connector *connector); | ||
1763 | void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | ||
1764 | void intel_attach_aspect_ratio_property(struct drm_connector *connector); | ||
1765 | |||
1673 | /* intel_csr.c */ | 1766 | /* intel_csr.c */ |
1674 | void intel_csr_ucode_init(struct drm_i915_private *); | 1767 | void intel_csr_ucode_init(struct drm_i915_private *); |
1675 | void intel_csr_load_program(struct drm_i915_private *); | 1768 | void intel_csr_load_program(struct drm_i915_private *); |
@@ -1728,9 +1821,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, | |||
1728 | unsigned int frontbuffer_bits); | 1821 | unsigned int frontbuffer_bits); |
1729 | void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, | 1822 | void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, |
1730 | unsigned int frontbuffer_bits); | 1823 | unsigned int frontbuffer_bits); |
1731 | void icl_program_mg_dp_mode(struct intel_dp *intel_dp); | ||
1732 | void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port); | ||
1733 | void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port); | ||
1734 | 1824 | ||
1735 | void | 1825 | void |
1736 | intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, | 1826 | intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, |
@@ -1748,6 +1838,10 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); | |||
1748 | bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); | 1838 | bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); |
1749 | bool | 1839 | bool |
1750 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); | 1840 | intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); |
1841 | uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count, | ||
1842 | int mode_clock, int mode_hdisplay); | ||
1843 | uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, | ||
1844 | int mode_hdisplay); | ||
1751 | 1845 | ||
1752 | static inline unsigned int intel_dp_unused_lane_mask(int lane_count) | 1846 | static inline unsigned int intel_dp_unused_lane_mask(int lane_count) |
1753 | { | 1847 | { |
@@ -1768,6 +1862,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); | |||
1768 | /* vlv_dsi.c */ | 1862 | /* vlv_dsi.c */ |
1769 | void vlv_dsi_init(struct drm_i915_private *dev_priv); | 1863 | void vlv_dsi_init(struct drm_i915_private *dev_priv); |
1770 | 1864 | ||
1865 | /* icl_dsi.c */ | ||
1866 | void icl_dsi_init(struct drm_i915_private *dev_priv); | ||
1867 | |||
1771 | /* intel_dsi_dcs_backlight.c */ | 1868 | /* intel_dsi_dcs_backlight.c */ |
1772 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); | 1869 | int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); |
1773 | 1870 | ||
@@ -1858,7 +1955,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, | |||
1858 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); | 1955 | void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); |
1859 | void intel_infoframe_init(struct intel_digital_port *intel_dig_port); | 1956 | void intel_infoframe_init(struct intel_digital_port *intel_dig_port); |
1860 | 1957 | ||
1861 | |||
1862 | /* intel_lvds.c */ | 1958 | /* intel_lvds.c */ |
1863 | bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, | 1959 | bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, |
1864 | i915_reg_t lvds_reg, enum pipe *pipe); | 1960 | i915_reg_t lvds_reg, enum pipe *pipe); |
@@ -1866,19 +1962,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv); | |||
1866 | struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); | 1962 | struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); |
1867 | bool intel_is_dual_link_lvds(struct drm_device *dev); | 1963 | bool intel_is_dual_link_lvds(struct drm_device *dev); |
1868 | 1964 | ||
1869 | |||
1870 | /* intel_modes.c */ | ||
1871 | int intel_connector_update_modes(struct drm_connector *connector, | ||
1872 | struct edid *edid); | ||
1873 | int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); | ||
1874 | void intel_attach_force_audio_property(struct drm_connector *connector); | ||
1875 | void intel_attach_broadcast_rgb_property(struct drm_connector *connector); | ||
1876 | void intel_attach_aspect_ratio_property(struct drm_connector *connector); | ||
1877 | |||
1878 | |||
1879 | /* intel_overlay.c */ | 1965 | /* intel_overlay.c */ |
1880 | void intel_setup_overlay(struct drm_i915_private *dev_priv); | 1966 | void intel_overlay_setup(struct drm_i915_private *dev_priv); |
1881 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv); | 1967 | void intel_overlay_cleanup(struct drm_i915_private *dev_priv); |
1882 | int intel_overlay_switch_off(struct intel_overlay *overlay); | 1968 | int intel_overlay_switch_off(struct intel_overlay *overlay); |
1883 | int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, | 1969 | int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, |
1884 | struct drm_file *file_priv); | 1970 | struct drm_file *file_priv); |
@@ -1907,7 +1993,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector, | |||
1907 | void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, | 1993 | void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state, |
1908 | const struct drm_connector_state *conn_state); | 1994 | const struct drm_connector_state *conn_state); |
1909 | void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); | 1995 | void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); |
1910 | void intel_panel_destroy_backlight(struct drm_connector *connector); | ||
1911 | extern struct drm_display_mode *intel_find_panel_downclock( | 1996 | extern struct drm_display_mode *intel_find_panel_downclock( |
1912 | struct drm_i915_private *dev_priv, | 1997 | struct drm_i915_private *dev_priv, |
1913 | struct drm_display_mode *fixed_mode, | 1998 | struct drm_display_mode *fixed_mode, |
@@ -1936,6 +2021,7 @@ int intel_hdcp_enable(struct intel_connector *connector); | |||
1936 | int intel_hdcp_disable(struct intel_connector *connector); | 2021 | int intel_hdcp_disable(struct intel_connector *connector); |
1937 | int intel_hdcp_check_link(struct intel_connector *connector); | 2022 | int intel_hdcp_check_link(struct intel_connector *connector); |
1938 | bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); | 2023 | bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); |
2024 | bool intel_hdcp_capable(struct intel_connector *connector); | ||
1939 | 2025 | ||
1940 | /* intel_psr.c */ | 2026 | /* intel_psr.c */ |
1941 | #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) | 2027 | #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) |
@@ -1962,11 +2048,16 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp); | |||
1962 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, | 2048 | int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, |
1963 | u32 *out_value); | 2049 | u32 *out_value); |
1964 | 2050 | ||
2051 | /* intel_quirks.c */ | ||
2052 | void intel_init_quirks(struct drm_i915_private *dev_priv); | ||
2053 | |||
1965 | /* intel_runtime_pm.c */ | 2054 | /* intel_runtime_pm.c */ |
1966 | int intel_power_domains_init(struct drm_i915_private *); | 2055 | int intel_power_domains_init(struct drm_i915_private *); |
1967 | void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); | 2056 | void intel_power_domains_cleanup(struct drm_i915_private *dev_priv); |
1968 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); | 2057 | void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume); |
1969 | void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); | 2058 | void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv); |
2059 | void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume); | ||
2060 | void icl_display_core_uninit(struct drm_i915_private *dev_priv); | ||
1970 | void intel_power_domains_enable(struct drm_i915_private *dev_priv); | 2061 | void intel_power_domains_enable(struct drm_i915_private *dev_priv); |
1971 | void intel_power_domains_disable(struct drm_i915_private *dev_priv); | 2062 | void intel_power_domains_disable(struct drm_i915_private *dev_priv); |
1972 | 2063 | ||
@@ -2101,10 +2192,9 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv); | |||
2101 | int intel_disable_sagv(struct drm_i915_private *dev_priv); | 2192 | int intel_disable_sagv(struct drm_i915_private *dev_priv); |
2102 | bool skl_wm_level_equals(const struct skl_wm_level *l1, | 2193 | bool skl_wm_level_equals(const struct skl_wm_level *l1, |
2103 | const struct skl_wm_level *l2); | 2194 | const struct skl_wm_level *l2); |
2104 | bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, | 2195 | bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, |
2105 | const struct skl_ddb_entry **entries, | 2196 | const struct skl_ddb_entry entries[], |
2106 | const struct skl_ddb_entry *ddb, | 2197 | int num_entries, int ignore_idx); |
2107 | int ignore); | ||
2108 | bool ilk_disable_lp_wm(struct drm_device *dev); | 2198 | bool ilk_disable_lp_wm(struct drm_device *dev); |
2109 | int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, | 2199 | int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, |
2110 | struct intel_crtc_state *cstate); | 2200 | struct intel_crtc_state *cstate); |
@@ -2127,23 +2217,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, | |||
2127 | struct drm_file *file_priv); | 2217 | struct drm_file *file_priv); |
2128 | void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); | 2218 | void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); |
2129 | void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); | 2219 | void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); |
2130 | void skl_update_plane(struct intel_plane *plane, | ||
2131 | const struct intel_crtc_state *crtc_state, | ||
2132 | const struct intel_plane_state *plane_state); | ||
2133 | void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc); | ||
2134 | bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe); | ||
2135 | bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, | ||
2136 | enum pipe pipe, enum plane_id plane_id); | ||
2137 | bool skl_plane_has_planar(struct drm_i915_private *dev_priv, | ||
2138 | enum pipe pipe, enum plane_id plane_id); | ||
2139 | unsigned int skl_plane_max_stride(struct intel_plane *plane, | ||
2140 | u32 pixel_format, u64 modifier, | ||
2141 | unsigned int rotation); | ||
2142 | int skl_plane_check(struct intel_crtc_state *crtc_state, | ||
2143 | struct intel_plane_state *plane_state); | ||
2144 | int intel_plane_check_stride(const struct intel_plane_state *plane_state); | 2220 | int intel_plane_check_stride(const struct intel_plane_state *plane_state); |
2145 | int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); | 2221 | int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); |
2146 | int chv_plane_check_rotation(const struct intel_plane_state *plane_state); | 2222 | int chv_plane_check_rotation(const struct intel_plane_state *plane_state); |
2223 | struct intel_plane * | ||
2224 | skl_universal_plane_create(struct drm_i915_private *dev_priv, | ||
2225 | enum pipe pipe, enum plane_id plane_id); | ||
2226 | |||
2227 | static inline bool icl_is_nv12_y_plane(enum plane_id id) | ||
2228 | { | ||
2229 | /* Don't need to do a gen check, these planes are only available on gen11 */ | ||
2230 | if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5) | ||
2231 | return true; | ||
2232 | |||
2233 | return false; | ||
2234 | } | ||
2235 | |||
2236 | static inline bool icl_is_hdr_plane(struct intel_plane *plane) | ||
2237 | { | ||
2238 | if (INTEL_GEN(to_i915(plane->base.dev)) < 11) | ||
2239 | return false; | ||
2240 | |||
2241 | return plane->id < PLANE_SPRITE2; | ||
2242 | } | ||
2147 | 2243 | ||
2148 | /* intel_tv.c */ | 2244 | /* intel_tv.c */ |
2149 | void intel_tv_init(struct drm_i915_private *dev_priv); | 2245 | void intel_tv_init(struct drm_i915_private *dev_priv); |
@@ -2185,11 +2281,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, | |||
2185 | struct intel_crtc_state *crtc_state); | 2281 | struct intel_crtc_state *crtc_state); |
2186 | 2282 | ||
2187 | /* intel_atomic_plane.c */ | 2283 | /* intel_atomic_plane.c */ |
2188 | struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); | 2284 | struct intel_plane *intel_plane_alloc(void); |
2285 | void intel_plane_free(struct intel_plane *plane); | ||
2189 | struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); | 2286 | struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); |
2190 | void intel_plane_destroy_state(struct drm_plane *plane, | 2287 | void intel_plane_destroy_state(struct drm_plane *plane, |
2191 | struct drm_plane_state *state); | 2288 | struct drm_plane_state *state); |
2192 | extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; | 2289 | extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; |
2290 | void intel_update_planes_on_crtc(struct intel_atomic_state *old_state, | ||
2291 | struct intel_crtc *crtc, | ||
2292 | struct intel_crtc_state *old_crtc_state, | ||
2293 | struct intel_crtc_state *new_crtc_state); | ||
2193 | int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, | 2294 | int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, |
2194 | struct intel_crtc_state *crtc_state, | 2295 | struct intel_crtc_state *crtc_state, |
2195 | const struct intel_plane_state *old_plane_state, | 2296 | const struct intel_plane_state *old_plane_state, |
@@ -2205,6 +2306,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state); | |||
2205 | bool lspcon_init(struct intel_digital_port *intel_dig_port); | 2306 | bool lspcon_init(struct intel_digital_port *intel_dig_port); |
2206 | void lspcon_resume(struct intel_lspcon *lspcon); | 2307 | void lspcon_resume(struct intel_lspcon *lspcon); |
2207 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); | 2308 | void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); |
2309 | void lspcon_write_infoframe(struct intel_encoder *encoder, | ||
2310 | const struct intel_crtc_state *crtc_state, | ||
2311 | unsigned int type, | ||
2312 | const void *buf, ssize_t len); | ||
2313 | void lspcon_set_infoframes(struct intel_encoder *encoder, | ||
2314 | bool enable, | ||
2315 | const struct intel_crtc_state *crtc_state, | ||
2316 | const struct drm_connector_state *conn_state); | ||
2317 | bool lspcon_infoframe_enabled(struct intel_encoder *encoder, | ||
2318 | const struct intel_crtc_state *pipe_config); | ||
2319 | void lspcon_ycbcr420_config(struct drm_connector *connector, | ||
2320 | struct intel_crtc_state *crtc_state); | ||
2208 | 2321 | ||
2209 | /* intel_pipe_crc.c */ | 2322 | /* intel_pipe_crc.c */ |
2210 | #ifdef CONFIG_DEBUG_FS | 2323 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c new file mode 100644 index 000000000000..5fec02aceaed --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
@@ -0,0 +1,128 @@ | |||
1 | // SPDX-License-Identifier: MIT | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corporation | ||
4 | */ | ||
5 | |||
6 | #include <drm/drm_mipi_dsi.h> | ||
7 | #include "intel_dsi.h" | ||
8 | |||
9 | int intel_dsi_bitrate(const struct intel_dsi *intel_dsi) | ||
10 | { | ||
11 | int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | ||
12 | |||
13 | if (WARN_ON(bpp < 0)) | ||
14 | bpp = 16; | ||
15 | |||
16 | return intel_dsi->pclk * bpp / intel_dsi->lane_count; | ||
17 | } | ||
18 | |||
19 | int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi) | ||
20 | { | ||
21 | switch (intel_dsi->escape_clk_div) { | ||
22 | default: | ||
23 | case 0: | ||
24 | return 50; | ||
25 | case 1: | ||
26 | return 100; | ||
27 | case 2: | ||
28 | return 200; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | int intel_dsi_get_modes(struct drm_connector *connector) | ||
33 | { | ||
34 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
35 | struct drm_display_mode *mode; | ||
36 | |||
37 | DRM_DEBUG_KMS("\n"); | ||
38 | |||
39 | if (!intel_connector->panel.fixed_mode) { | ||
40 | DRM_DEBUG_KMS("no fixed mode\n"); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | mode = drm_mode_duplicate(connector->dev, | ||
45 | intel_connector->panel.fixed_mode); | ||
46 | if (!mode) { | ||
47 | DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); | ||
48 | return 0; | ||
49 | } | ||
50 | |||
51 | drm_mode_probed_add(connector, mode); | ||
52 | return 1; | ||
53 | } | ||
54 | |||
55 | enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, | ||
56 | struct drm_display_mode *mode) | ||
57 | { | ||
58 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
59 | const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | ||
60 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | ||
61 | |||
62 | DRM_DEBUG_KMS("\n"); | ||
63 | |||
64 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
65 | return MODE_NO_DBLESCAN; | ||
66 | |||
67 | if (fixed_mode) { | ||
68 | if (mode->hdisplay > fixed_mode->hdisplay) | ||
69 | return MODE_PANEL; | ||
70 | if (mode->vdisplay > fixed_mode->vdisplay) | ||
71 | return MODE_PANEL; | ||
72 | if (fixed_mode->clock > max_dotclk) | ||
73 | return MODE_CLOCK_HIGH; | ||
74 | } | ||
75 | |||
76 | return MODE_OK; | ||
77 | } | ||
78 | |||
79 | struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, | ||
80 | const struct mipi_dsi_host_ops *funcs, | ||
81 | enum port port) | ||
82 | { | ||
83 | struct intel_dsi_host *host; | ||
84 | struct mipi_dsi_device *device; | ||
85 | |||
86 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
87 | if (!host) | ||
88 | return NULL; | ||
89 | |||
90 | host->base.ops = funcs; | ||
91 | host->intel_dsi = intel_dsi; | ||
92 | host->port = port; | ||
93 | |||
94 | /* | ||
95 | * We should call mipi_dsi_host_register(&host->base) here, but we don't | ||
96 | * have a host->dev, and we don't have OF stuff either. So just use the | ||
97 | * dsi framework as a library and hope for the best. Create the dsi | ||
98 | * devices by ourselves here too. Need to be careful though, because we | ||
99 | * don't initialize any of the driver model devices here. | ||
100 | */ | ||
101 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
102 | if (!device) { | ||
103 | kfree(host); | ||
104 | return NULL; | ||
105 | } | ||
106 | |||
107 | device->host = &host->base; | ||
108 | host->device = device; | ||
109 | |||
110 | return host; | ||
111 | } | ||
112 | |||
113 | enum drm_panel_orientation | ||
114 | intel_dsi_get_panel_orientation(struct intel_connector *connector) | ||
115 | { | ||
116 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | ||
117 | enum drm_panel_orientation orientation; | ||
118 | |||
119 | orientation = dev_priv->vbt.dsi.orientation; | ||
120 | if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) | ||
121 | return orientation; | ||
122 | |||
123 | orientation = dev_priv->vbt.orientation; | ||
124 | if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) | ||
125 | return orientation; | ||
126 | |||
127 | return DRM_MODE_PANEL_ORIENTATION_NORMAL; | ||
128 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index ad7c1cb32983..ee93137f4433 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h | |||
@@ -81,14 +81,21 @@ struct intel_dsi { | |||
81 | u16 dcs_backlight_ports; | 81 | u16 dcs_backlight_ports; |
82 | u16 dcs_cabc_ports; | 82 | u16 dcs_cabc_ports; |
83 | 83 | ||
84 | /* RGB or BGR */ | ||
85 | bool bgr_enabled; | ||
86 | |||
84 | u8 pixel_overlap; | 87 | u8 pixel_overlap; |
85 | u32 port_bits; | 88 | u32 port_bits; |
86 | u32 bw_timer; | 89 | u32 bw_timer; |
87 | u32 dphy_reg; | 90 | u32 dphy_reg; |
91 | |||
92 | /* data lanes dphy timing */ | ||
93 | u32 dphy_data_lane_reg; | ||
88 | u32 video_frmt_cfg_bits; | 94 | u32 video_frmt_cfg_bits; |
89 | u16 lp_byte_clk; | 95 | u16 lp_byte_clk; |
90 | 96 | ||
91 | /* timeouts in byte clocks */ | 97 | /* timeouts in byte clocks */ |
98 | u16 hs_tx_timeout; | ||
92 | u16 lp_rx_timeout; | 99 | u16 lp_rx_timeout; |
93 | u16 turn_arnd_val; | 100 | u16 turn_arnd_val; |
94 | u16 rst_timer_val; | 101 | u16 rst_timer_val; |
@@ -129,9 +136,31 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) | |||
129 | return container_of(encoder, struct intel_dsi, base.base); | 136 | return container_of(encoder, struct intel_dsi, base.base); |
130 | } | 137 | } |
131 | 138 | ||
139 | static inline bool is_vid_mode(struct intel_dsi *intel_dsi) | ||
140 | { | ||
141 | return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE; | ||
142 | } | ||
143 | |||
144 | static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) | ||
145 | { | ||
146 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; | ||
147 | } | ||
148 | |||
149 | /* intel_dsi.c */ | ||
150 | int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); | ||
151 | int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); | ||
152 | enum drm_panel_orientation | ||
153 | intel_dsi_get_panel_orientation(struct intel_connector *connector); | ||
154 | |||
132 | /* vlv_dsi.c */ | 155 | /* vlv_dsi.c */ |
133 | void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); | 156 | void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); |
134 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); | 157 | enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); |
158 | int intel_dsi_get_modes(struct drm_connector *connector); | ||
159 | enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, | ||
160 | struct drm_display_mode *mode); | ||
161 | struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, | ||
162 | const struct mipi_dsi_host_ops *funcs, | ||
163 | enum port port); | ||
135 | 164 | ||
136 | /* vlv_dsi_pll.c */ | 165 | /* vlv_dsi_pll.c */ |
137 | int vlv_dsi_pll_compute(struct intel_encoder *encoder, | 166 | int vlv_dsi_pll_compute(struct intel_encoder *encoder, |
@@ -158,5 +187,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); | |||
158 | int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); | 187 | int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); |
159 | void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, | 188 | void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, |
160 | enum mipi_seq seq_id); | 189 | enum mipi_seq seq_id); |
190 | void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); | ||
161 | 191 | ||
162 | #endif /* _INTEL_DSI_H */ | 192 | #endif /* _INTEL_DSI_H */ |
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index ac83d6b89ae0..a72de81f4832 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c | |||
@@ -111,6 +111,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port) | |||
111 | static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, | 111 | static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, |
112 | const u8 *data) | 112 | const u8 *data) |
113 | { | 113 | { |
114 | struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); | ||
114 | struct mipi_dsi_device *dsi_device; | 115 | struct mipi_dsi_device *dsi_device; |
115 | u8 type, flags, seq_port; | 116 | u8 type, flags, seq_port; |
116 | u16 len; | 117 | u16 len; |
@@ -181,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, | |||
181 | break; | 182 | break; |
182 | } | 183 | } |
183 | 184 | ||
184 | vlv_dsi_wait_for_fifo_empty(intel_dsi, port); | 185 | if (!IS_ICELAKE(dev_priv)) |
186 | vlv_dsi_wait_for_fifo_empty(intel_dsi, port); | ||
185 | 187 | ||
186 | out: | 188 | out: |
187 | data += len; | 189 | data += len; |
@@ -481,6 +483,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, | |||
481 | } | 483 | } |
482 | } | 484 | } |
483 | 485 | ||
486 | void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) | ||
487 | { | ||
488 | struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); | ||
489 | |||
490 | /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */ | ||
491 | if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3) | ||
492 | return; | ||
493 | |||
494 | msleep(msec); | ||
495 | } | ||
496 | |||
484 | int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) | 497 | int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) |
485 | { | 498 | { |
486 | struct intel_connector *connector = intel_dsi->attached_connector; | 499 | struct intel_connector *connector = intel_dsi->attached_connector; |
@@ -499,110 +512,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) | |||
499 | return 1; | 512 | return 1; |
500 | } | 513 | } |
501 | 514 | ||
502 | bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) | 515 | #define ICL_PREPARE_CNT_MAX 0x7 |
516 | #define ICL_CLK_ZERO_CNT_MAX 0xf | ||
517 | #define ICL_TRAIL_CNT_MAX 0x7 | ||
518 | #define ICL_TCLK_PRE_CNT_MAX 0x3 | ||
519 | #define ICL_TCLK_POST_CNT_MAX 0x7 | ||
520 | #define ICL_HS_ZERO_CNT_MAX 0xf | ||
521 | #define ICL_EXIT_ZERO_CNT_MAX 0x7 | ||
522 | |||
523 | static void icl_dphy_param_init(struct intel_dsi *intel_dsi) | ||
503 | { | 524 | { |
504 | struct drm_device *dev = intel_dsi->base.base.dev; | 525 | struct drm_device *dev = intel_dsi->base.base.dev; |
505 | struct drm_i915_private *dev_priv = to_i915(dev); | 526 | struct drm_i915_private *dev_priv = to_i915(dev); |
506 | struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; | 527 | struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; |
507 | struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; | 528 | u32 tlpx_ns; |
508 | struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; | ||
509 | u32 bpp; | ||
510 | u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui; | ||
511 | u32 ui_num, ui_den; | ||
512 | u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; | 529 | u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; |
513 | u32 ths_prepare_ns, tclk_trail_ns; | 530 | u32 ths_prepare_ns, tclk_trail_ns; |
514 | u32 tclk_prepare_clkzero, ths_prepare_hszero; | 531 | u32 hs_zero_cnt; |
515 | u32 lp_to_hs_switch, hs_to_lp_switch; | 532 | u32 tclk_pre_cnt, tclk_post_cnt; |
516 | u32 pclk, computed_ddr; | ||
517 | u32 mul; | ||
518 | u16 burst_mode_ratio; | ||
519 | enum port port; | ||
520 | 533 | ||
521 | DRM_DEBUG_KMS("\n"); | 534 | tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); |
522 | 535 | ||
523 | intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; | 536 | tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail); |
524 | intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; | 537 | ths_prepare_ns = max(mipi_config->ths_prepare, |
525 | intel_dsi->lane_count = mipi_config->lane_cnt + 1; | 538 | mipi_config->tclk_prepare); |
526 | intel_dsi->pixel_format = | ||
527 | pixel_format_from_register_bits( | ||
528 | mipi_config->videomode_color_format << 7); | ||
529 | bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); | ||
530 | |||
531 | intel_dsi->dual_link = mipi_config->dual_link; | ||
532 | intel_dsi->pixel_overlap = mipi_config->pixel_overlap; | ||
533 | intel_dsi->operation_mode = mipi_config->is_cmd_mode; | ||
534 | intel_dsi->video_mode_format = mipi_config->video_transfer_mode; | ||
535 | intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; | ||
536 | intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; | ||
537 | intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout; | ||
538 | intel_dsi->rst_timer_val = mipi_config->device_reset_timer; | ||
539 | intel_dsi->init_count = mipi_config->master_init_timer; | ||
540 | intel_dsi->bw_timer = mipi_config->dbi_bw_timer; | ||
541 | intel_dsi->video_frmt_cfg_bits = | ||
542 | mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; | ||
543 | |||
544 | pclk = mode->clock; | ||
545 | 539 | ||
546 | /* In dual link mode each port needs half of pixel clock */ | 540 | /* |
547 | if (intel_dsi->dual_link) { | 541 | * prepare cnt in escape clocks |
548 | pclk = pclk / 2; | 542 | * this field represents a hexadecimal value with a precision |
543 | * of 1.2 – i.e. the most significant bit is the integer | ||
544 | * and the least significant 2 bits are fraction bits. | ||
545 | * so, the field can represent a range of 0.25 to 1.75 | ||
546 | */ | ||
547 | prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns); | ||
548 | if (prepare_cnt > ICL_PREPARE_CNT_MAX) { | ||
549 | DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt); | ||
550 | prepare_cnt = ICL_PREPARE_CNT_MAX; | ||
551 | } | ||
549 | 552 | ||
550 | /* we can enable pixel_overlap if needed by panel. In this | 553 | /* clk zero count in escape clocks */ |
551 | * case we need to increase the pixelclock for extra pixels | 554 | clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero - |
552 | */ | 555 | ths_prepare_ns, tlpx_ns); |
553 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { | 556 | if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) { |
554 | pclk += DIV_ROUND_UP(mode->vtotal * | 557 | DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt); |
555 | intel_dsi->pixel_overlap * | 558 | clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX; |
556 | 60, 1000); | ||
557 | } | ||
558 | } | 559 | } |
559 | 560 | ||
560 | /* Burst Mode Ratio | 561 | /* trail cnt in escape clocks*/ |
561 | * Target ddr frequency from VBT / non burst ddr freq | 562 | trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns); |
562 | * multiply by 100 to preserve remainder | 563 | if (trail_cnt > ICL_TRAIL_CNT_MAX) { |
563 | */ | 564 | DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt); |
564 | if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { | 565 | trail_cnt = ICL_TRAIL_CNT_MAX; |
565 | if (mipi_config->target_burst_mode_freq) { | 566 | } |
566 | computed_ddr = (pclk * bpp) / intel_dsi->lane_count; | ||
567 | 567 | ||
568 | if (mipi_config->target_burst_mode_freq < | 568 | /* tclk pre count in escape clocks */ |
569 | computed_ddr) { | 569 | tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns); |
570 | DRM_ERROR("Burst mode freq is less than computed\n"); | 570 | if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) { |
571 | return false; | 571 | DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt); |
572 | } | 572 | tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX; |
573 | } | ||
573 | 574 | ||
574 | burst_mode_ratio = DIV_ROUND_UP( | 575 | /* tclk post count in escape clocks */ |
575 | mipi_config->target_burst_mode_freq * 100, | 576 | tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns); |
576 | computed_ddr); | 577 | if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) { |
578 | DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt); | ||
579 | tclk_post_cnt = ICL_TCLK_POST_CNT_MAX; | ||
580 | } | ||
577 | 581 | ||
578 | pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100); | 582 | /* hs zero cnt in escape clocks */ |
579 | } else { | 583 | hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero - |
580 | DRM_ERROR("Burst mode target is not set\n"); | 584 | ths_prepare_ns, tlpx_ns); |
581 | return false; | 585 | if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) { |
582 | } | 586 | DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt); |
583 | } else | 587 | hs_zero_cnt = ICL_HS_ZERO_CNT_MAX; |
584 | burst_mode_ratio = 100; | 588 | } |
585 | 589 | ||
586 | intel_dsi->burst_mode_ratio = burst_mode_ratio; | 590 | /* hs exit zero cnt in escape clocks */ |
587 | intel_dsi->pclk = pclk; | 591 | exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns); |
592 | if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) { | ||
593 | DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt); | ||
594 | exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX; | ||
595 | } | ||
588 | 596 | ||
589 | bitrate = (pclk * bpp) / intel_dsi->lane_count; | 597 | /* clock lane dphy timings */ |
598 | intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE | | ||
599 | CLK_PREPARE(prepare_cnt) | | ||
600 | CLK_ZERO_OVERRIDE | | ||
601 | CLK_ZERO(clk_zero_cnt) | | ||
602 | CLK_PRE_OVERRIDE | | ||
603 | CLK_PRE(tclk_pre_cnt) | | ||
604 | CLK_POST_OVERRIDE | | ||
605 | CLK_POST(tclk_post_cnt) | | ||
606 | CLK_TRAIL_OVERRIDE | | ||
607 | CLK_TRAIL(trail_cnt)); | ||
608 | |||
609 | /* data lanes dphy timings */ | ||
610 | intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE | | ||
611 | HS_PREPARE(prepare_cnt) | | ||
612 | HS_ZERO_OVERRIDE | | ||
613 | HS_ZERO(hs_zero_cnt) | | ||
614 | HS_TRAIL_OVERRIDE | | ||
615 | HS_TRAIL(trail_cnt) | | ||
616 | HS_EXIT_OVERRIDE | | ||
617 | HS_EXIT(exit_zero_cnt)); | ||
618 | } | ||
590 | 619 | ||
591 | switch (intel_dsi->escape_clk_div) { | 620 | static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) |
592 | case 0: | 621 | { |
593 | tlpx_ns = 50; | 622 | struct drm_device *dev = intel_dsi->base.base.dev; |
594 | break; | 623 | struct drm_i915_private *dev_priv = to_i915(dev); |
595 | case 1: | 624 | struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; |
596 | tlpx_ns = 100; | 625 | u32 tlpx_ns, extra_byte_count, tlpx_ui; |
597 | break; | 626 | u32 ui_num, ui_den; |
627 | u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; | ||
628 | u32 ths_prepare_ns, tclk_trail_ns; | ||
629 | u32 tclk_prepare_clkzero, ths_prepare_hszero; | ||
630 | u32 lp_to_hs_switch, hs_to_lp_switch; | ||
631 | u32 mul; | ||
598 | 632 | ||
599 | case 2: | 633 | tlpx_ns = intel_dsi_tlpx_ns(intel_dsi); |
600 | tlpx_ns = 200; | ||
601 | break; | ||
602 | default: | ||
603 | tlpx_ns = 50; | ||
604 | break; | ||
605 | } | ||
606 | 634 | ||
607 | switch (intel_dsi->lane_count) { | 635 | switch (intel_dsi->lane_count) { |
608 | case 1: | 636 | case 1: |
@@ -620,7 +648,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) | |||
620 | 648 | ||
621 | /* in Kbps */ | 649 | /* in Kbps */ |
622 | ui_num = NS_KHZ_RATIO; | 650 | ui_num = NS_KHZ_RATIO; |
623 | ui_den = bitrate; | 651 | ui_den = intel_dsi_bitrate(intel_dsi); |
624 | 652 | ||
625 | tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; | 653 | tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero; |
626 | ths_prepare_hszero = mipi_config->ths_prepare_hszero; | 654 | ths_prepare_hszero = mipi_config->ths_prepare_hszero; |
@@ -746,6 +774,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) | |||
746 | DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, | 774 | DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8, |
747 | 8); | 775 | 8); |
748 | intel_dsi->clk_hs_to_lp_count += extra_byte_count; | 776 | intel_dsi->clk_hs_to_lp_count += extra_byte_count; |
777 | } | ||
778 | |||
779 | bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) | ||
780 | { | ||
781 | struct drm_device *dev = intel_dsi->base.base.dev; | ||
782 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
783 | struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; | ||
784 | struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; | ||
785 | struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; | ||
786 | u16 burst_mode_ratio; | ||
787 | enum port port; | ||
788 | |||
789 | DRM_DEBUG_KMS("\n"); | ||
790 | |||
791 | intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; | ||
792 | intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; | ||
793 | intel_dsi->lane_count = mipi_config->lane_cnt + 1; | ||
794 | intel_dsi->pixel_format = | ||
795 | pixel_format_from_register_bits( | ||
796 | mipi_config->videomode_color_format << 7); | ||
797 | |||
798 | intel_dsi->dual_link = mipi_config->dual_link; | ||
799 | intel_dsi->pixel_overlap = mipi_config->pixel_overlap; | ||
800 | intel_dsi->operation_mode = mipi_config->is_cmd_mode; | ||
801 | intel_dsi->video_mode_format = mipi_config->video_transfer_mode; | ||
802 | intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; | ||
803 | intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout; | ||
804 | intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout; | ||
805 | intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout; | ||
806 | intel_dsi->rst_timer_val = mipi_config->device_reset_timer; | ||
807 | intel_dsi->init_count = mipi_config->master_init_timer; | ||
808 | intel_dsi->bw_timer = mipi_config->dbi_bw_timer; | ||
809 | intel_dsi->video_frmt_cfg_bits = | ||
810 | mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; | ||
811 | intel_dsi->bgr_enabled = mipi_config->rgb_flip; | ||
812 | |||
813 | /* Starting point, adjusted depending on dual link and burst mode */ | ||
814 | intel_dsi->pclk = mode->clock; | ||
815 | |||
816 | /* In dual link mode each port needs half of pixel clock */ | ||
817 | if (intel_dsi->dual_link) { | ||
818 | intel_dsi->pclk /= 2; | ||
819 | |||
820 | /* we can enable pixel_overlap if needed by panel. In this | ||
821 | * case we need to increase the pixelclock for extra pixels | ||
822 | */ | ||
823 | if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { | ||
824 | intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000); | ||
825 | } | ||
826 | } | ||
827 | |||
828 | /* Burst Mode Ratio | ||
829 | * Target ddr frequency from VBT / non burst ddr freq | ||
830 | * multiply by 100 to preserve remainder | ||
831 | */ | ||
832 | if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { | ||
833 | if (mipi_config->target_burst_mode_freq) { | ||
834 | u32 bitrate = intel_dsi_bitrate(intel_dsi); | ||
835 | |||
836 | if (mipi_config->target_burst_mode_freq < bitrate) { | ||
837 | DRM_ERROR("Burst mode freq is less than computed\n"); | ||
838 | return false; | ||
839 | } | ||
840 | |||
841 | burst_mode_ratio = DIV_ROUND_UP( | ||
842 | mipi_config->target_burst_mode_freq * 100, | ||
843 | bitrate); | ||
844 | |||
845 | intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100); | ||
846 | } else { | ||
847 | DRM_ERROR("Burst mode target is not set\n"); | ||
848 | return false; | ||
849 | } | ||
850 | } else | ||
851 | burst_mode_ratio = 100; | ||
852 | |||
853 | intel_dsi->burst_mode_ratio = burst_mode_ratio; | ||
854 | |||
855 | if (IS_ICELAKE(dev_priv)) | ||
856 | icl_dphy_param_init(intel_dsi); | ||
857 | else | ||
858 | vlv_dphy_param_init(intel_dsi); | ||
749 | 859 | ||
750 | DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); | 860 | DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk); |
751 | DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); | 861 | DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap); |
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 4e142ff49708..0042a7f69387 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c | |||
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, | |||
256 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | 256 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
257 | return false; | 257 | return false; |
258 | 258 | ||
259 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
259 | return true; | 260 | return true; |
260 | } | 261 | } |
261 | 262 | ||
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector) | |||
333 | return 0; | 334 | return 0; |
334 | } | 335 | } |
335 | 336 | ||
336 | static void intel_dvo_destroy(struct drm_connector *connector) | ||
337 | { | ||
338 | drm_connector_cleanup(connector); | ||
339 | intel_panel_fini(&to_intel_connector(connector)->panel); | ||
340 | kfree(connector); | ||
341 | } | ||
342 | |||
343 | static const struct drm_connector_funcs intel_dvo_connector_funcs = { | 337 | static const struct drm_connector_funcs intel_dvo_connector_funcs = { |
344 | .detect = intel_dvo_detect, | 338 | .detect = intel_dvo_detect, |
345 | .late_register = intel_connector_register, | 339 | .late_register = intel_connector_register, |
346 | .early_unregister = intel_connector_unregister, | 340 | .early_unregister = intel_connector_unregister, |
347 | .destroy = intel_dvo_destroy, | 341 | .destroy = intel_connector_destroy, |
348 | .fill_modes = drm_helper_probe_single_connector_modes, | 342 | .fill_modes = drm_helper_probe_single_connector_modes, |
349 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 343 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
350 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 344 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 217ed3ee1cab..759c0fd58f8c 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | |||
273 | BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); | 273 | BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); |
274 | BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); | 274 | BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); |
275 | 275 | ||
276 | if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS)) | 276 | if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) |
277 | return -EINVAL; | 277 | return -EINVAL; |
278 | 278 | ||
279 | if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) | 279 | if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) |
280 | return -EINVAL; | 280 | return -EINVAL; |
281 | 281 | ||
282 | if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance])) | 282 | if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance])) |
283 | return -EINVAL; | 283 | return -EINVAL; |
284 | 284 | ||
285 | GEM_BUG_ON(dev_priv->engine[id]); | 285 | GEM_BUG_ON(dev_priv->engine[id]); |
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) | |||
335 | 335 | ||
336 | WARN_ON(ring_mask == 0); | 336 | WARN_ON(ring_mask == 0); |
337 | WARN_ON(ring_mask & | 337 | WARN_ON(ring_mask & |
338 | GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES)); | 338 | GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); |
339 | |||
340 | if (i915_inject_load_failure()) | ||
341 | return -ENODEV; | ||
339 | 342 | ||
340 | for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { | 343 | for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { |
341 | if (!HAS_ENGINE(dev_priv, i)) | 344 | if (!HAS_ENGINE(dev_priv, i)) |
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) | |||
399 | err = -EINVAL; | 402 | err = -EINVAL; |
400 | err_id = id; | 403 | err_id = id; |
401 | 404 | ||
402 | if (GEM_WARN_ON(!init)) | 405 | if (GEM_DEBUG_WARN_ON(!init)) |
403 | goto cleanup; | 406 | goto cleanup; |
404 | 407 | ||
405 | err = init(engine); | 408 | err = init(engine); |
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) | |||
463 | struct intel_engine_execlists * const execlists = &engine->execlists; | 466 | struct intel_engine_execlists * const execlists = &engine->execlists; |
464 | 467 | ||
465 | execlists->port_mask = 1; | 468 | execlists->port_mask = 1; |
466 | BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); | 469 | GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); |
467 | GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); | 470 | GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); |
468 | 471 | ||
469 | execlists->queue_priority = INT_MIN; | 472 | execlists->queue_priority = INT_MIN; |
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine) | |||
482 | void intel_engine_setup_common(struct intel_engine_cs *engine) | 485 | void intel_engine_setup_common(struct intel_engine_cs *engine) |
483 | { | 486 | { |
484 | i915_timeline_init(engine->i915, &engine->timeline, engine->name); | 487 | i915_timeline_init(engine->i915, &engine->timeline, engine->name); |
485 | lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE); | 488 | i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE); |
486 | 489 | ||
487 | intel_engine_init_execlist(engine); | 490 | intel_engine_init_execlist(engine); |
488 | intel_engine_init_hangcheck(engine); | 491 | intel_engine_init_hangcheck(engine); |
@@ -809,7 +812,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv) | |||
809 | u32 slice = fls(sseu->slice_mask); | 812 | u32 slice = fls(sseu->slice_mask); |
810 | u32 subslice = fls(sseu->subslice_mask[slice]); | 813 | u32 subslice = fls(sseu->subslice_mask[slice]); |
811 | 814 | ||
812 | if (INTEL_GEN(dev_priv) == 10) | 815 | if (IS_GEN10(dev_priv)) |
813 | mcr_s_ss_select = GEN8_MCR_SLICE(slice) | | 816 | mcr_s_ss_select = GEN8_MCR_SLICE(slice) | |
814 | GEN8_MCR_SUBSLICE(subslice); | 817 | GEN8_MCR_SUBSLICE(subslice); |
815 | else if (INTEL_GEN(dev_priv) >= 11) | 818 | else if (INTEL_GEN(dev_priv) >= 11) |
@@ -1534,10 +1537,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1534 | count = 0; | 1537 | count = 0; |
1535 | drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); | 1538 | drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority); |
1536 | for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { | 1539 | for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { |
1537 | struct i915_priolist *p = | 1540 | struct i915_priolist *p = rb_entry(rb, typeof(*p), node); |
1538 | rb_entry(rb, typeof(*p), node); | 1541 | int i; |
1539 | 1542 | ||
1540 | list_for_each_entry(rq, &p->requests, sched.link) { | 1543 | priolist_for_each_request(rq, p, i) { |
1541 | if (count++ < MAX_REQUESTS_TO_SHOW - 1) | 1544 | if (count++ < MAX_REQUESTS_TO_SHOW - 1) |
1542 | print_request(m, rq, "\t\tQ "); | 1545 | print_request(m, rq, "\t\tQ "); |
1543 | else | 1546 | else |
@@ -1559,8 +1562,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, | |||
1559 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { | 1562 | for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) { |
1560 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); | 1563 | struct intel_wait *w = rb_entry(rb, typeof(*w), node); |
1561 | 1564 | ||
1562 | drm_printf(m, "\t%s [%d] waiting for %x\n", | 1565 | drm_printf(m, "\t%s [%d:%c] waiting for %x\n", |
1563 | w->tsk->comm, w->tsk->pid, w->seqno); | 1566 | w->tsk->comm, w->tsk->pid, |
1567 | task_state_to_char(w->tsk), | ||
1568 | w->seqno); | ||
1564 | } | 1569 | } |
1565 | spin_unlock(&b->rb_lock); | 1570 | spin_unlock(&b->rb_lock); |
1566 | local_irq_restore(flags); | 1571 | local_irq_restore(flags); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 74d425c700ef..14cbaf4a0e93 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, | |||
84 | int lines; | 84 | int lines; |
85 | 85 | ||
86 | intel_fbc_get_plane_source_size(cache, NULL, &lines); | 86 | intel_fbc_get_plane_source_size(cache, NULL, &lines); |
87 | if (INTEL_GEN(dev_priv) == 7) | 87 | if (IS_GEN7(dev_priv)) |
88 | lines = min(lines, 2048); | 88 | lines = min(lines, 2048); |
89 | else if (INTEL_GEN(dev_priv) >= 8) | 89 | else if (INTEL_GEN(dev_priv) >= 8) |
90 | lines = min(lines, 2560); | 90 | lines = min(lines, 2560); |
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, | |||
674 | cache->plane.adjusted_y = plane_state->color_plane[0].y; | 674 | cache->plane.adjusted_y = plane_state->color_plane[0].y; |
675 | cache->plane.y = plane_state->base.src.y1 >> 16; | 675 | cache->plane.y = plane_state->base.src.y1 >> 16; |
676 | 676 | ||
677 | cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode; | ||
678 | |||
677 | if (!cache->plane.visible) | 679 | if (!cache->plane.visible) |
678 | return; | 680 | return; |
679 | 681 | ||
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) | |||
748 | return false; | 750 | return false; |
749 | } | 751 | } |
750 | 752 | ||
753 | if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && | ||
754 | cache->fb.format->has_alpha) { | ||
755 | fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; | ||
756 | return false; | ||
757 | } | ||
758 | |||
751 | /* WaFbcExceedCdClockThreshold:hsw,bdw */ | 759 | /* WaFbcExceedCdClockThreshold:hsw,bdw */ |
752 | if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && | 760 | if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && |
753 | cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { | 761 | cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index f99332972b7a..2480c7d6edee 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
593 | * pipe. Note we need to use the selected fb's pitch and bpp | 593 | * pipe. Note we need to use the selected fb's pitch and bpp |
594 | * rather than the current pipe's, since they differ. | 594 | * rather than the current pipe's, since they differ. |
595 | */ | 595 | */ |
596 | cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; | 596 | cur_size = crtc->state->adjusted_mode.crtc_hdisplay; |
597 | cur_size = cur_size * fb->base.format->cpp[0]; | 597 | cur_size = cur_size * fb->base.format->cpp[0]; |
598 | if (fb->base.pitches[0] < cur_size) { | 598 | if (fb->base.pitches[0] < cur_size) { |
599 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", | 599 | DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", |
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, | |||
603 | break; | 603 | break; |
604 | } | 604 | } |
605 | 605 | ||
606 | cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; | 606 | cur_size = crtc->state->adjusted_mode.crtc_vdisplay; |
607 | cur_size = intel_fb_align_height(&fb->base, 0, cur_size); | 607 | cur_size = intel_fb_align_height(&fb->base, 0, cur_size); |
608 | cur_size *= fb->base.pitches[0]; | 608 | cur_size *= fb->base.pitches[0]; |
609 | DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", | 609 | DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", |
610 | pipe_name(intel_crtc->pipe), | 610 | pipe_name(intel_crtc->pipe), |
611 | intel_crtc->config->base.adjusted_mode.crtc_hdisplay, | 611 | crtc->state->adjusted_mode.crtc_hdisplay, |
612 | intel_crtc->config->base.adjusted_mode.crtc_vdisplay, | 612 | crtc->state->adjusted_mode.crtc_vdisplay, |
613 | fb->base.format->cpp[0] * 8, | 613 | fb->base.format->cpp[0] * 8, |
614 | cur_size); | 614 | cur_size); |
615 | 615 | ||
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 230aea69385d..8660af3fd755 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c | |||
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc) | |||
50 | unsigned int i; | 50 | unsigned int i; |
51 | 51 | ||
52 | guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); | 52 | guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0)); |
53 | guc->send_regs.count = SOFT_SCRATCH_COUNT - 1; | 53 | guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN; |
54 | BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); | ||
54 | 55 | ||
55 | for (i = 0; i < guc->send_regs.count; i++) { | 56 | for (i = 0; i < guc->send_regs.count; i++) { |
56 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, | 57 | fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, |
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) | |||
521 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); | 522 | return intel_guc_send(guc, action, ARRAY_SIZE(action)); |
522 | } | 523 | } |
523 | 524 | ||
525 | /* | ||
526 | * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and | ||
527 | * then return, so waiting on the H2G is not enough to guarantee GuC is done. | ||
528 | * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to | ||
529 | * scratch register 14, so we can poll on that. Note that GuC does not ensure | ||
530 | * that the value in the register is different from | ||
531 | * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to | ||
532 | * take care of that ourselves as well. | ||
533 | */ | ||
534 | static int guc_sleep_state_action(struct intel_guc *guc, | ||
535 | const u32 *action, u32 len) | ||
536 | { | ||
537 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
538 | int ret; | ||
539 | u32 status; | ||
540 | |||
541 | I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK); | ||
542 | |||
543 | ret = intel_guc_send(guc, action, len); | ||
544 | if (ret) | ||
545 | return ret; | ||
546 | |||
547 | ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14), | ||
548 | INTEL_GUC_SLEEP_STATE_INVALID_MASK, | ||
549 | 0, 0, 10, &status); | ||
550 | if (ret) | ||
551 | return ret; | ||
552 | |||
553 | if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) { | ||
554 | DRM_ERROR("GuC failed to change sleep state. " | ||
555 | "action=0x%x, err=%u\n", | ||
556 | action[0], status); | ||
557 | return -EIO; | ||
558 | } | ||
559 | |||
560 | return 0; | ||
561 | } | ||
562 | |||
524 | /** | 563 | /** |
525 | * intel_guc_suspend() - notify GuC entering suspend state | 564 | * intel_guc_suspend() - notify GuC entering suspend state |
526 | * @guc: the guc | 565 | * @guc: the guc |
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc) | |||
533 | intel_guc_ggtt_offset(guc, guc->shared_data) | 572 | intel_guc_ggtt_offset(guc, guc->shared_data) |
534 | }; | 573 | }; |
535 | 574 | ||
536 | return intel_guc_send(guc, data, ARRAY_SIZE(data)); | 575 | return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); |
537 | } | 576 | } |
538 | 577 | ||
539 | /** | 578 | /** |
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc) | |||
571 | intel_guc_ggtt_offset(guc, guc->shared_data) | 610 | intel_guc_ggtt_offset(guc, guc->shared_data) |
572 | }; | 611 | }; |
573 | 612 | ||
574 | return intel_guc_send(guc, data, ARRAY_SIZE(data)); | 613 | return guc_sleep_state_action(guc, data, ARRAY_SIZE(data)); |
575 | } | 614 | } |
576 | 615 | ||
577 | /** | 616 | /** |
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index ad42faf48c46..0f1c4f9ebfd8 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h | |||
@@ -95,6 +95,11 @@ struct intel_guc { | |||
95 | void (*notify)(struct intel_guc *guc); | 95 | void (*notify)(struct intel_guc *guc); |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static inline bool intel_guc_is_alive(struct intel_guc *guc) | ||
99 | { | ||
100 | return intel_uc_fw_is_loaded(&guc->fw); | ||
101 | } | ||
102 | |||
98 | static | 103 | static |
99 | inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) | 104 | inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) |
100 | { | 105 | { |
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index a9e6fcce467c..a67144ee5ceb 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c | |||
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) | |||
78 | guc_fw->major_ver_wanted = KBL_FW_MAJOR; | 78 | guc_fw->major_ver_wanted = KBL_FW_MAJOR; |
79 | guc_fw->minor_ver_wanted = KBL_FW_MINOR; | 79 | guc_fw->minor_ver_wanted = KBL_FW_MINOR; |
80 | } else { | 80 | } else { |
81 | DRM_WARN("%s: No firmware known for this platform!\n", | 81 | dev_info(dev_priv->drm.dev, |
82 | "%s: No firmware known for this platform!\n", | ||
82 | intel_uc_fw_type_repr(guc_fw->type)); | 83 | intel_uc_fw_type_repr(guc_fw->type)); |
83 | } | 84 | } |
84 | } | 85 | } |
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc) | |||
125 | } | 126 | } |
126 | 127 | ||
127 | /* Copy RSA signature from the fw image to HW for verification */ | 128 | /* Copy RSA signature from the fw image to HW for verification */ |
128 | static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) | 129 | static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma) |
129 | { | 130 | { |
130 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 131 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
131 | struct intel_uc_fw *guc_fw = &guc->fw; | ||
132 | struct sg_table *sg = vma->pages; | ||
133 | u32 rsa[UOS_RSA_SCRATCH_COUNT]; | 132 | u32 rsa[UOS_RSA_SCRATCH_COUNT]; |
134 | int i; | 133 | int i; |
135 | 134 | ||
136 | if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), | 135 | sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents, |
137 | guc_fw->rsa_offset) != sizeof(rsa)) | 136 | rsa, sizeof(rsa), guc->fw.rsa_offset); |
138 | return -EINVAL; | ||
139 | 137 | ||
140 | for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) | 138 | for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) |
141 | I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); | 139 | I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); |
142 | |||
143 | return 0; | ||
144 | } | 140 | } |
145 | 141 | ||
146 | /* | 142 | static bool guc_xfer_completed(struct intel_guc *guc, u32 *status) |
147 | * Transfer the firmware image to RAM for execution by the microcontroller. | ||
148 | * | ||
149 | * Architecturally, the DMA engine is bidirectional, and can potentially even | ||
150 | * transfer between GTT locations. This functionality is left out of the API | ||
151 | * for now as there is no need for it. | ||
152 | */ | ||
153 | static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) | ||
154 | { | 143 | { |
155 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 144 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
156 | struct intel_uc_fw *guc_fw = &guc->fw; | ||
157 | unsigned long offset; | ||
158 | u32 status; | ||
159 | int ret; | ||
160 | |||
161 | /* | ||
162 | * The header plus uCode will be copied to WOPCM via DMA, excluding any | ||
163 | * other components | ||
164 | */ | ||
165 | I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); | ||
166 | |||
167 | /* Set the source address for the new blob */ | ||
168 | offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset; | ||
169 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); | ||
170 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); | ||
171 | 145 | ||
172 | /* | 146 | /* Did we complete the xfer? */ |
173 | * Set the DMA destination. Current uCode expects the code to be | 147 | *status = I915_READ(DMA_CTRL); |
174 | * loaded at 8k; locations below this are used for the stack. | 148 | return !(*status & START_DMA); |
175 | */ | ||
176 | I915_WRITE(DMA_ADDR_1_LOW, 0x2000); | ||
177 | I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); | ||
178 | |||
179 | /* Finally start the DMA */ | ||
180 | I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); | ||
181 | |||
182 | /* Wait for DMA to finish */ | ||
183 | ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, | ||
184 | 2, 100, &status); | ||
185 | DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status); | ||
186 | |||
187 | return ret; | ||
188 | } | 149 | } |
189 | 150 | ||
190 | /* | 151 | /* |
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc) | |||
217 | * NB: Docs recommend not using the interrupt for completion. | 178 | * NB: Docs recommend not using the interrupt for completion. |
218 | * Measurements indicate this should take no more than 20ms, so a | 179 | * Measurements indicate this should take no more than 20ms, so a |
219 | * timeout here indicates that the GuC has failed and is unusable. | 180 | * timeout here indicates that the GuC has failed and is unusable. |
220 | * (Higher levels of the driver will attempt to fall back to | 181 | * (Higher levels of the driver may decide to reset the GuC and |
221 | * execlist mode if this happens.) | 182 | * attempt the ucode load again if this happens.) |
222 | */ | 183 | */ |
223 | ret = wait_for(guc_ready(guc, &status), 100); | 184 | ret = wait_for(guc_ready(guc, &status), 100); |
224 | DRM_DEBUG_DRIVER("GuC status %#x\n", status); | 185 | DRM_DEBUG_DRIVER("GuC status %#x\n", status); |
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc) | |||
228 | ret = -ENOEXEC; | 189 | ret = -ENOEXEC; |
229 | } | 190 | } |
230 | 191 | ||
192 | if (ret == 0 && !guc_xfer_completed(guc, &status)) { | ||
193 | DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n", | ||
194 | status); | ||
195 | ret = -ENXIO; | ||
196 | } | ||
197 | |||
231 | return ret; | 198 | return ret; |
232 | } | 199 | } |
233 | 200 | ||
234 | /* | 201 | /* |
202 | * Transfer the firmware image to RAM for execution by the microcontroller. | ||
203 | * | ||
204 | * Architecturally, the DMA engine is bidirectional, and can potentially even | ||
205 | * transfer between GTT locations. This functionality is left out of the API | ||
206 | * for now as there is no need for it. | ||
207 | */ | ||
208 | static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma) | ||
209 | { | ||
210 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
211 | struct intel_uc_fw *guc_fw = &guc->fw; | ||
212 | unsigned long offset; | ||
213 | |||
214 | /* | ||
215 | * The header plus uCode will be copied to WOPCM via DMA, excluding any | ||
216 | * other components | ||
217 | */ | ||
218 | I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size); | ||
219 | |||
220 | /* Set the source address for the new blob */ | ||
221 | offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset; | ||
222 | I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); | ||
223 | I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); | ||
224 | |||
225 | /* | ||
226 | * Set the DMA destination. Current uCode expects the code to be | ||
227 | * loaded at 8k; locations below this are used for the stack. | ||
228 | */ | ||
229 | I915_WRITE(DMA_ADDR_1_LOW, 0x2000); | ||
230 | I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); | ||
231 | |||
232 | /* Finally start the DMA */ | ||
233 | I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); | ||
234 | |||
235 | return guc_wait_ucode(guc); | ||
236 | } | ||
237 | /* | ||
235 | * Load the GuC firmware blob into the MinuteIA. | 238 | * Load the GuC firmware blob into the MinuteIA. |
236 | */ | 239 | */ |
237 | static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) | 240 | static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) |
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) | |||
251 | * by the DMA engine in one operation, whereas the RSA signature is | 254 | * by the DMA engine in one operation, whereas the RSA signature is |
252 | * loaded via MMIO. | 255 | * loaded via MMIO. |
253 | */ | 256 | */ |
254 | ret = guc_xfer_rsa(guc, vma); | 257 | guc_xfer_rsa(guc, vma); |
255 | if (ret) | ||
256 | DRM_WARN("GuC firmware signature xfer error %d\n", ret); | ||
257 | 258 | ||
258 | ret = guc_xfer_ucode(guc, vma); | 259 | ret = guc_xfer_ucode(guc, vma); |
259 | if (ret) | ||
260 | DRM_WARN("GuC firmware code xfer error %d\n", ret); | ||
261 | |||
262 | ret = guc_wait_ucode(guc); | ||
263 | if (ret) | ||
264 | DRM_ERROR("GuC firmware xfer error %d\n", ret); | ||
265 | 260 | ||
266 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); | 261 | intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); |
267 | 262 | ||
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 8382d591c784..b2f5148f4f17 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h | |||
@@ -39,6 +39,11 @@ | |||
39 | #define GUC_VIDEO_ENGINE2 4 | 39 | #define GUC_VIDEO_ENGINE2 4 |
40 | #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) | 40 | #define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) |
41 | 41 | ||
42 | #define GUC_DOORBELL_INVALID 256 | ||
43 | |||
44 | #define GUC_DB_SIZE (PAGE_SIZE) | ||
45 | #define GUC_WQ_SIZE (PAGE_SIZE * 2) | ||
46 | |||
42 | /* Work queue item header definitions */ | 47 | /* Work queue item header definitions */ |
43 | #define WQ_STATUS_ACTIVE 1 | 48 | #define WQ_STATUS_ACTIVE 1 |
44 | #define WQ_STATUS_SUSPENDED 2 | 49 | #define WQ_STATUS_SUSPENDED 2 |
@@ -59,9 +64,6 @@ | |||
59 | #define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ | 64 | #define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ |
60 | #define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) | 65 | #define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT) |
61 | 66 | ||
62 | #define GUC_DOORBELL_ENABLED 1 | ||
63 | #define GUC_DOORBELL_DISABLED 0 | ||
64 | |||
65 | #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) | 67 | #define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0) |
66 | #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) | 68 | #define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1) |
67 | #define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) | 69 | #define GUC_STAGE_DESC_ATTR_KERNEL BIT(2) |
@@ -219,26 +221,6 @@ struct uc_css_header { | |||
219 | u32 header_info; | 221 | u32 header_info; |
220 | } __packed; | 222 | } __packed; |
221 | 223 | ||
222 | struct guc_doorbell_info { | ||
223 | u32 db_status; | ||
224 | u32 cookie; | ||
225 | u32 reserved[14]; | ||
226 | } __packed; | ||
227 | |||
228 | union guc_doorbell_qw { | ||
229 | struct { | ||
230 | u32 db_status; | ||
231 | u32 cookie; | ||
232 | }; | ||
233 | u64 value_qw; | ||
234 | } __packed; | ||
235 | |||
236 | #define GUC_NUM_DOORBELLS 256 | ||
237 | #define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS) | ||
238 | |||
239 | #define GUC_DB_SIZE (PAGE_SIZE) | ||
240 | #define GUC_WQ_SIZE (PAGE_SIZE * 2) | ||
241 | |||
242 | /* Work item for submitting workloads into work queue of GuC. */ | 224 | /* Work item for submitting workloads into work queue of GuC. */ |
243 | struct guc_wq_item { | 225 | struct guc_wq_item { |
244 | u32 header; | 226 | u32 header; |
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data { | |||
601 | * registers, where first register holds data treated as message header, | 583 | * registers, where first register holds data treated as message header, |
602 | * and other registers are used to hold message payload. | 584 | * and other registers are used to hold message payload. |
603 | * | 585 | * |
604 | * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8 | 586 | * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8, |
587 | * but no H2G command takes more than 8 parameters and the GuC FW | ||
588 | * itself uses an 8-element array to store the H2G message. | ||
605 | * | 589 | * |
606 | * +-----------+---------+---------+---------+ | 590 | * +-----------+---------+---------+---------+ |
607 | * | MMIO[0] | MMIO[1] | ... | MMIO[n] | | 591 | * | MMIO[0] | MMIO[1] | ... | MMIO[n] | |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data { | |||
633 | * field. | 617 | * field. |
634 | */ | 618 | */ |
635 | 619 | ||
620 | #define GUC_MAX_MMIO_MSG_LEN 8 | ||
621 | |||
636 | #define INTEL_GUC_MSG_TYPE_SHIFT 28 | 622 | #define INTEL_GUC_MSG_TYPE_SHIFT 28 |
637 | #define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) | 623 | #define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT) |
638 | #define INTEL_GUC_MSG_DATA_SHIFT 16 | 624 | #define INTEL_GUC_MSG_DATA_SHIFT 16 |
@@ -687,6 +673,13 @@ enum intel_guc_report_status { | |||
687 | INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, | 673 | INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4, |
688 | }; | 674 | }; |
689 | 675 | ||
676 | enum intel_guc_sleep_state_status { | ||
677 | INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0, | ||
678 | INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1, | ||
679 | INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2 | ||
680 | #define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000 | ||
681 | }; | ||
682 | |||
690 | #define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) | 683 | #define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0) |
691 | #define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 | 684 | #define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4 |
692 | #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | 685 | #define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h index d86084742a4a..57e7ad522c2f 100644 --- a/drivers/gpu/drm/i915/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/intel_guc_reg.h | |||
@@ -104,6 +104,18 @@ | |||
104 | #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) | 104 | #define GUC_SEND_INTERRUPT _MMIO(0xc4c8) |
105 | #define GUC_SEND_TRIGGER (1<<0) | 105 | #define GUC_SEND_TRIGGER (1<<0) |
106 | 106 | ||
107 | #define GUC_NUM_DOORBELLS 256 | ||
108 | |||
109 | /* format of the HW-monitored doorbell cacheline */ | ||
110 | struct guc_doorbell_info { | ||
111 | u32 db_status; | ||
112 | #define GUC_DOORBELL_DISABLED 0 | ||
113 | #define GUC_DOORBELL_ENABLED 1 | ||
114 | |||
115 | u32 cookie; | ||
116 | u32 reserved[14]; | ||
117 | } __packed; | ||
118 | |||
107 | #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) | 119 | #define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8) |
108 | #define GEN8_DRB_VALID (1<<0) | 120 | #define GEN8_DRB_VALID (1<<0) |
109 | #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) | 121 | #define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4) |
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index a81f04d46e87..1570dcbe249c 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c | |||
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client) | |||
192 | return client->vaddr + client->doorbell_offset; | 192 | return client->vaddr + client->doorbell_offset; |
193 | } | 193 | } |
194 | 194 | ||
195 | static void __create_doorbell(struct intel_guc_client *client) | 195 | static bool __doorbell_valid(struct intel_guc *guc, u16 db_id) |
196 | { | ||
197 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
198 | |||
199 | GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); | ||
200 | return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID; | ||
201 | } | ||
202 | |||
203 | static void __init_doorbell(struct intel_guc_client *client) | ||
196 | { | 204 | { |
197 | struct guc_doorbell_info *doorbell; | 205 | struct guc_doorbell_info *doorbell; |
198 | 206 | ||
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client) | |||
201 | doorbell->cookie = 0; | 209 | doorbell->cookie = 0; |
202 | } | 210 | } |
203 | 211 | ||
204 | static void __destroy_doorbell(struct intel_guc_client *client) | 212 | static void __fini_doorbell(struct intel_guc_client *client) |
205 | { | 213 | { |
206 | struct drm_i915_private *dev_priv = guc_to_i915(client->guc); | ||
207 | struct guc_doorbell_info *doorbell; | 214 | struct guc_doorbell_info *doorbell; |
208 | u16 db_id = client->doorbell_id; | 215 | u16 db_id = client->doorbell_id; |
209 | 216 | ||
210 | doorbell = __get_doorbell(client); | 217 | doorbell = __get_doorbell(client); |
211 | doorbell->db_status = GUC_DOORBELL_DISABLED; | 218 | doorbell->db_status = GUC_DOORBELL_DISABLED; |
212 | doorbell->cookie = 0; | ||
213 | 219 | ||
214 | /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit | 220 | /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit |
215 | * to go to zero after updating db_status before we call the GuC to | 221 | * to go to zero after updating db_status before we call the GuC to |
216 | * release the doorbell | 222 | * release the doorbell |
217 | */ | 223 | */ |
218 | if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10)) | 224 | if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10)) |
219 | WARN_ONCE(true, "Doorbell never became invalid after disable\n"); | 225 | WARN_ONCE(true, "Doorbell never became invalid after disable\n"); |
220 | } | 226 | } |
221 | 227 | ||
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client) | |||
227 | return -ENODEV; /* internal setup error, should never happen */ | 233 | return -ENODEV; /* internal setup error, should never happen */ |
228 | 234 | ||
229 | __update_doorbell_desc(client, client->doorbell_id); | 235 | __update_doorbell_desc(client, client->doorbell_id); |
230 | __create_doorbell(client); | 236 | __init_doorbell(client); |
231 | 237 | ||
232 | ret = __guc_allocate_doorbell(client->guc, client->stage_id); | 238 | ret = __guc_allocate_doorbell(client->guc, client->stage_id); |
233 | if (ret) { | 239 | if (ret) { |
234 | __destroy_doorbell(client); | 240 | __fini_doorbell(client); |
235 | __update_doorbell_desc(client, GUC_DOORBELL_INVALID); | 241 | __update_doorbell_desc(client, GUC_DOORBELL_INVALID); |
236 | DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", | 242 | DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n", |
237 | client->stage_id, ret); | 243 | client->stage_id, ret); |
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client) | |||
247 | 253 | ||
248 | GEM_BUG_ON(!has_doorbell(client)); | 254 | GEM_BUG_ON(!has_doorbell(client)); |
249 | 255 | ||
250 | __destroy_doorbell(client); | 256 | __fini_doorbell(client); |
251 | ret = __guc_deallocate_doorbell(client->guc, client->stage_id); | 257 | ret = __guc_deallocate_doorbell(client->guc, client->stage_id); |
252 | if (ret) | 258 | if (ret) |
253 | DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", | 259 | DRM_ERROR("Couldn't destroy client %u doorbell: %d\n", |
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client) | |||
282 | /* | 288 | /* |
283 | * Initialise the process descriptor shared with the GuC firmware. | 289 | * Initialise the process descriptor shared with the GuC firmware. |
284 | */ | 290 | */ |
285 | static void guc_proc_desc_init(struct intel_guc *guc, | 291 | static void guc_proc_desc_init(struct intel_guc_client *client) |
286 | struct intel_guc_client *client) | ||
287 | { | 292 | { |
288 | struct guc_process_desc *desc; | 293 | struct guc_process_desc *desc; |
289 | 294 | ||
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc, | |||
304 | desc->priority = client->priority; | 309 | desc->priority = client->priority; |
305 | } | 310 | } |
306 | 311 | ||
312 | static void guc_proc_desc_fini(struct intel_guc_client *client) | ||
313 | { | ||
314 | struct guc_process_desc *desc; | ||
315 | |||
316 | desc = __get_process_desc(client); | ||
317 | memset(desc, 0, sizeof(*desc)); | ||
318 | } | ||
319 | |||
307 | static int guc_stage_desc_pool_create(struct intel_guc *guc) | 320 | static int guc_stage_desc_pool_create(struct intel_guc *guc) |
308 | { | 321 | { |
309 | struct i915_vma *vma; | 322 | struct i915_vma *vma; |
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc) | |||
341 | * data structures relating to this client (doorbell, process descriptor, | 354 | * data structures relating to this client (doorbell, process descriptor, |
342 | * write queue, etc). | 355 | * write queue, etc). |
343 | */ | 356 | */ |
344 | static void guc_stage_desc_init(struct intel_guc *guc, | 357 | static void guc_stage_desc_init(struct intel_guc_client *client) |
345 | struct intel_guc_client *client) | ||
346 | { | 358 | { |
359 | struct intel_guc *guc = client->guc; | ||
347 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | 360 | struct drm_i915_private *dev_priv = guc_to_i915(guc); |
348 | struct intel_engine_cs *engine; | 361 | struct intel_engine_cs *engine; |
349 | struct i915_gem_context *ctx = client->owner; | 362 | struct i915_gem_context *ctx = client->owner; |
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc, | |||
424 | desc->desc_private = ptr_to_u64(client); | 437 | desc->desc_private = ptr_to_u64(client); |
425 | } | 438 | } |
426 | 439 | ||
427 | static void guc_stage_desc_fini(struct intel_guc *guc, | 440 | static void guc_stage_desc_fini(struct intel_guc_client *client) |
428 | struct intel_guc_client *client) | ||
429 | { | 441 | { |
430 | struct guc_stage_desc *desc; | 442 | struct guc_stage_desc *desc; |
431 | 443 | ||
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client, | |||
486 | WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); | 498 | WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); |
487 | } | 499 | } |
488 | 500 | ||
489 | static void guc_reset_wq(struct intel_guc_client *client) | ||
490 | { | ||
491 | struct guc_process_desc *desc = __get_process_desc(client); | ||
492 | |||
493 | desc->head = 0; | ||
494 | desc->tail = 0; | ||
495 | } | ||
496 | |||
497 | static void guc_ring_doorbell(struct intel_guc_client *client) | 501 | static void guc_ring_doorbell(struct intel_guc_client *client) |
498 | { | 502 | { |
499 | struct guc_doorbell_info *db; | 503 | struct guc_doorbell_info *db; |
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) | |||
746 | while ((rb = rb_first_cached(&execlists->queue))) { | 750 | while ((rb = rb_first_cached(&execlists->queue))) { |
747 | struct i915_priolist *p = to_priolist(rb); | 751 | struct i915_priolist *p = to_priolist(rb); |
748 | struct i915_request *rq, *rn; | 752 | struct i915_request *rq, *rn; |
753 | int i; | ||
749 | 754 | ||
750 | list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { | 755 | priolist_for_each_request_consume(rq, rn, p, i) { |
751 | if (last && rq->hw_context != last->hw_context) { | 756 | if (last && rq->hw_context != last->hw_context) { |
752 | if (port == last_port) { | 757 | if (port == last_port) |
753 | __list_del_many(&p->requests, | ||
754 | &rq->sched.link); | ||
755 | goto done; | 758 | goto done; |
756 | } | ||
757 | 759 | ||
758 | if (submit) | 760 | if (submit) |
759 | port_assign(port, last); | 761 | port_assign(port, last); |
760 | port++; | 762 | port++; |
761 | } | 763 | } |
762 | 764 | ||
763 | INIT_LIST_HEAD(&rq->sched.link); | 765 | list_del_init(&rq->sched.link); |
764 | 766 | ||
765 | __i915_request_submit(rq); | 767 | __i915_request_submit(rq); |
766 | trace_i915_request_in(rq, port_index(port, execlists)); | 768 | trace_i915_request_in(rq, port_index(port, execlists)); |
769 | |||
767 | last = rq; | 770 | last = rq; |
768 | submit = true; | 771 | submit = true; |
769 | } | 772 | } |
770 | 773 | ||
771 | rb_erase_cached(&p->node, &execlists->queue); | 774 | rb_erase_cached(&p->node, &execlists->queue); |
772 | INIT_LIST_HEAD(&p->requests); | ||
773 | if (p->priority != I915_PRIORITY_NORMAL) | 775 | if (p->priority != I915_PRIORITY_NORMAL) |
774 | kmem_cache_free(engine->i915->priorities, p); | 776 | kmem_cache_free(engine->i915->priorities, p); |
775 | } | 777 | } |
@@ -791,19 +793,8 @@ done: | |||
791 | 793 | ||
792 | static void guc_dequeue(struct intel_engine_cs *engine) | 794 | static void guc_dequeue(struct intel_engine_cs *engine) |
793 | { | 795 | { |
794 | unsigned long flags; | 796 | if (__guc_dequeue(engine)) |
795 | bool submit; | ||
796 | |||
797 | local_irq_save(flags); | ||
798 | |||
799 | spin_lock(&engine->timeline.lock); | ||
800 | submit = __guc_dequeue(engine); | ||
801 | spin_unlock(&engine->timeline.lock); | ||
802 | |||
803 | if (submit) | ||
804 | guc_submit(engine); | 797 | guc_submit(engine); |
805 | |||
806 | local_irq_restore(flags); | ||
807 | } | 798 | } |
808 | 799 | ||
809 | static void guc_submission_tasklet(unsigned long data) | 800 | static void guc_submission_tasklet(unsigned long data) |
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data) | |||
812 | struct intel_engine_execlists * const execlists = &engine->execlists; | 803 | struct intel_engine_execlists * const execlists = &engine->execlists; |
813 | struct execlist_port *port = execlists->port; | 804 | struct execlist_port *port = execlists->port; |
814 | struct i915_request *rq; | 805 | struct i915_request *rq; |
806 | unsigned long flags; | ||
807 | |||
808 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
815 | 809 | ||
816 | rq = port_request(port); | 810 | rq = port_request(port); |
817 | while (rq && i915_request_completed(rq)) { | 811 | while (rq && i915_request_completed(rq)) { |
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data) | |||
835 | 829 | ||
836 | if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) | 830 | if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT)) |
837 | guc_dequeue(engine); | 831 | guc_dequeue(engine); |
832 | |||
833 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
838 | } | 834 | } |
839 | 835 | ||
840 | static struct i915_request * | 836 | static struct i915_request * |
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine) | |||
877 | /* Check that a doorbell register is in the expected state */ | 873 | /* Check that a doorbell register is in the expected state */ |
878 | static bool doorbell_ok(struct intel_guc *guc, u16 db_id) | 874 | static bool doorbell_ok(struct intel_guc *guc, u16 db_id) |
879 | { | 875 | { |
880 | struct drm_i915_private *dev_priv = guc_to_i915(guc); | ||
881 | u32 drbregl; | ||
882 | bool valid; | 876 | bool valid; |
883 | 877 | ||
884 | GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID); | 878 | GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS); |
885 | 879 | ||
886 | drbregl = I915_READ(GEN8_DRBREGL(db_id)); | 880 | valid = __doorbell_valid(guc, db_id); |
887 | valid = drbregl & GEN8_DRB_VALID; | ||
888 | 881 | ||
889 | if (test_bit(db_id, guc->doorbell_bitmap) == valid) | 882 | if (test_bit(db_id, guc->doorbell_bitmap) == valid) |
890 | return true; | 883 | return true; |
891 | 884 | ||
892 | DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n", | 885 | DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n", |
893 | db_id, drbregl, yesno(valid)); | 886 | db_id, yesno(valid)); |
894 | 887 | ||
895 | return false; | 888 | return false; |
896 | } | 889 | } |
897 | 890 | ||
898 | static bool guc_verify_doorbells(struct intel_guc *guc) | 891 | static bool guc_verify_doorbells(struct intel_guc *guc) |
899 | { | 892 | { |
893 | bool doorbells_ok = true; | ||
900 | u16 db_id; | 894 | u16 db_id; |
901 | 895 | ||
902 | for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) | 896 | for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) |
903 | if (!doorbell_ok(guc, db_id)) | 897 | if (!doorbell_ok(guc, db_id)) |
904 | return false; | 898 | doorbells_ok = false; |
905 | |||
906 | return true; | ||
907 | } | ||
908 | |||
909 | static int guc_clients_doorbell_init(struct intel_guc *guc) | ||
910 | { | ||
911 | int ret; | ||
912 | |||
913 | ret = create_doorbell(guc->execbuf_client); | ||
914 | if (ret) | ||
915 | return ret; | ||
916 | |||
917 | if (guc->preempt_client) { | ||
918 | ret = create_doorbell(guc->preempt_client); | ||
919 | if (ret) { | ||
920 | destroy_doorbell(guc->execbuf_client); | ||
921 | return ret; | ||
922 | } | ||
923 | } | ||
924 | |||
925 | return 0; | ||
926 | } | ||
927 | |||
928 | static void guc_clients_doorbell_fini(struct intel_guc *guc) | ||
929 | { | ||
930 | /* | ||
931 | * By the time we're here, GuC has already been reset. | ||
932 | * Instead of trying (in vain) to communicate with it, let's just | ||
933 | * cleanup the doorbell HW and our internal state. | ||
934 | */ | ||
935 | if (guc->preempt_client) { | ||
936 | __destroy_doorbell(guc->preempt_client); | ||
937 | __update_doorbell_desc(guc->preempt_client, | ||
938 | GUC_DOORBELL_INVALID); | ||
939 | } | ||
940 | 899 | ||
941 | if (guc->execbuf_client) { | 900 | return doorbells_ok; |
942 | __destroy_doorbell(guc->execbuf_client); | ||
943 | __update_doorbell_desc(guc->execbuf_client, | ||
944 | GUC_DOORBELL_INVALID); | ||
945 | } | ||
946 | } | 901 | } |
947 | 902 | ||
948 | /** | 903 | /** |
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv, | |||
1005 | } | 960 | } |
1006 | client->vaddr = vaddr; | 961 | client->vaddr = vaddr; |
1007 | 962 | ||
963 | ret = reserve_doorbell(client); | ||
964 | if (ret) | ||
965 | goto err_vaddr; | ||
966 | |||
1008 | client->doorbell_offset = __select_cacheline(guc); | 967 | client->doorbell_offset = __select_cacheline(guc); |
1009 | 968 | ||
1010 | /* | 969 | /* |
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, | |||
1017 | else | 976 | else |
1018 | client->proc_desc_offset = (GUC_DB_SIZE / 2); | 977 | client->proc_desc_offset = (GUC_DB_SIZE / 2); |
1019 | 978 | ||
1020 | guc_proc_desc_init(guc, client); | ||
1021 | guc_stage_desc_init(guc, client); | ||
1022 | |||
1023 | ret = reserve_doorbell(client); | ||
1024 | if (ret) | ||
1025 | goto err_vaddr; | ||
1026 | |||
1027 | DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", | 979 | DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n", |
1028 | priority, client, client->engines, client->stage_id); | 980 | priority, client, client->engines, client->stage_id); |
1029 | DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", | 981 | DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n", |
@@ -1045,7 +997,6 @@ err_client: | |||
1045 | static void guc_client_free(struct intel_guc_client *client) | 997 | static void guc_client_free(struct intel_guc_client *client) |
1046 | { | 998 | { |
1047 | unreserve_doorbell(client); | 999 | unreserve_doorbell(client); |
1048 | guc_stage_desc_fini(client->guc, client); | ||
1049 | i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); | 1000 | i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP); |
1050 | ida_simple_remove(&client->guc->stage_ids, client->stage_id); | 1001 | ida_simple_remove(&client->guc->stage_ids, client->stage_id); |
1051 | kfree(client); | 1002 | kfree(client); |
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc) | |||
1112 | guc_client_free(client); | 1063 | guc_client_free(client); |
1113 | } | 1064 | } |
1114 | 1065 | ||
1066 | static int __guc_client_enable(struct intel_guc_client *client) | ||
1067 | { | ||
1068 | int ret; | ||
1069 | |||
1070 | guc_proc_desc_init(client); | ||
1071 | guc_stage_desc_init(client); | ||
1072 | |||
1073 | ret = create_doorbell(client); | ||
1074 | if (ret) | ||
1075 | goto fail; | ||
1076 | |||
1077 | return 0; | ||
1078 | |||
1079 | fail: | ||
1080 | guc_stage_desc_fini(client); | ||
1081 | guc_proc_desc_fini(client); | ||
1082 | return ret; | ||
1083 | } | ||
1084 | |||
1085 | static void __guc_client_disable(struct intel_guc_client *client) | ||
1086 | { | ||
1087 | /* | ||
1088 | * By the time we're here, GuC may have already been reset. if that is | ||
1089 | * the case, instead of trying (in vain) to communicate with it, let's | ||
1090 | * just cleanup the doorbell HW and our internal state. | ||
1091 | */ | ||
1092 | if (intel_guc_is_alive(client->guc)) | ||
1093 | destroy_doorbell(client); | ||
1094 | else | ||
1095 | __fini_doorbell(client); | ||
1096 | |||
1097 | guc_stage_desc_fini(client); | ||
1098 | guc_proc_desc_fini(client); | ||
1099 | } | ||
1100 | |||
1101 | static int guc_clients_enable(struct intel_guc *guc) | ||
1102 | { | ||
1103 | int ret; | ||
1104 | |||
1105 | ret = __guc_client_enable(guc->execbuf_client); | ||
1106 | if (ret) | ||
1107 | return ret; | ||
1108 | |||
1109 | if (guc->preempt_client) { | ||
1110 | ret = __guc_client_enable(guc->preempt_client); | ||
1111 | if (ret) { | ||
1112 | __guc_client_disable(guc->execbuf_client); | ||
1113 | return ret; | ||
1114 | } | ||
1115 | } | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1120 | static void guc_clients_disable(struct intel_guc *guc) | ||
1121 | { | ||
1122 | if (guc->preempt_client) | ||
1123 | __guc_client_disable(guc->preempt_client); | ||
1124 | |||
1125 | if (guc->execbuf_client) | ||
1126 | __guc_client_disable(guc->execbuf_client); | ||
1127 | } | ||
1128 | |||
1115 | /* | 1129 | /* |
1116 | * Set up the memory resources to be shared with the GuC (via the GGTT) | 1130 | * Set up the memory resources to be shared with the GuC (via the GGTT) |
1117 | * at firmware loading time. | 1131 | * at firmware loading time. |
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc) | |||
1295 | 1309 | ||
1296 | GEM_BUG_ON(!guc->execbuf_client); | 1310 | GEM_BUG_ON(!guc->execbuf_client); |
1297 | 1311 | ||
1298 | guc_reset_wq(guc->execbuf_client); | ||
1299 | if (guc->preempt_client) | ||
1300 | guc_reset_wq(guc->preempt_client); | ||
1301 | |||
1302 | err = intel_guc_sample_forcewake(guc); | 1312 | err = intel_guc_sample_forcewake(guc); |
1303 | if (err) | 1313 | if (err) |
1304 | return err; | 1314 | return err; |
1305 | 1315 | ||
1306 | err = guc_clients_doorbell_init(guc); | 1316 | err = guc_clients_enable(guc); |
1307 | if (err) | 1317 | if (err) |
1308 | return err; | 1318 | return err; |
1309 | 1319 | ||
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc) | |||
1325 | GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ | 1335 | GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */ |
1326 | 1336 | ||
1327 | guc_interrupts_release(dev_priv); | 1337 | guc_interrupts_release(dev_priv); |
1328 | guc_clients_doorbell_fini(guc); | 1338 | guc_clients_disable(guc); |
1329 | } | 1339 | } |
1330 | 1340 | ||
1331 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | 1341 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 26e48fc95543..1bf487f94254 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c | |||
@@ -16,6 +16,62 @@ | |||
16 | 16 | ||
17 | #define KEY_LOAD_TRIES 5 | 17 | #define KEY_LOAD_TRIES 5 |
18 | 18 | ||
19 | static | ||
20 | bool intel_hdcp_is_ksv_valid(u8 *ksv) | ||
21 | { | ||
22 | int i, ones = 0; | ||
23 | /* KSV has 20 1's and 20 0's */ | ||
24 | for (i = 0; i < DRM_HDCP_KSV_LEN; i++) | ||
25 | ones += hweight8(ksv[i]); | ||
26 | if (ones != 20) | ||
27 | return false; | ||
28 | |||
29 | return true; | ||
30 | } | ||
31 | |||
32 | static | ||
33 | int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, | ||
34 | const struct intel_hdcp_shim *shim, u8 *bksv) | ||
35 | { | ||
36 | int ret, i, tries = 2; | ||
37 | |||
38 | /* HDCP spec states that we must retry the bksv if it is invalid */ | ||
39 | for (i = 0; i < tries; i++) { | ||
40 | ret = shim->read_bksv(intel_dig_port, bksv); | ||
41 | if (ret) | ||
42 | return ret; | ||
43 | if (intel_hdcp_is_ksv_valid(bksv)) | ||
44 | break; | ||
45 | } | ||
46 | if (i == tries) { | ||
47 | DRM_DEBUG_KMS("Bksv is invalid\n"); | ||
48 | return -ENODEV; | ||
49 | } | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | /* Is HDCP1.4 capable on Platform and Sink */ | ||
55 | bool intel_hdcp_capable(struct intel_connector *connector) | ||
56 | { | ||
57 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | ||
58 | const struct intel_hdcp_shim *shim = connector->hdcp.shim; | ||
59 | bool capable = false; | ||
60 | u8 bksv[5]; | ||
61 | |||
62 | if (!shim) | ||
63 | return capable; | ||
64 | |||
65 | if (shim->hdcp_capable) { | ||
66 | shim->hdcp_capable(intel_dig_port, &capable); | ||
67 | } else { | ||
68 | if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) | ||
69 | capable = true; | ||
70 | } | ||
71 | |||
72 | return capable; | ||
73 | } | ||
74 | |||
19 | static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, | 75 | static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, |
20 | const struct intel_hdcp_shim *shim) | 76 | const struct intel_hdcp_shim *shim) |
21 | { | 77 | { |
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) | |||
168 | } | 224 | } |
169 | 225 | ||
170 | static | 226 | static |
171 | bool intel_hdcp_is_ksv_valid(u8 *ksv) | ||
172 | { | ||
173 | int i, ones = 0; | ||
174 | /* KSV has 20 1's and 20 0's */ | ||
175 | for (i = 0; i < DRM_HDCP_KSV_LEN; i++) | ||
176 | ones += hweight8(ksv[i]); | ||
177 | if (ones != 20) | ||
178 | return false; | ||
179 | return true; | ||
180 | } | ||
181 | |||
182 | static | ||
183 | int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, | 227 | int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, |
184 | const struct intel_hdcp_shim *shim, | 228 | const struct intel_hdcp_shim *shim, |
185 | u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) | 229 | u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) |
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, | |||
383 | if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, | 427 | if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, |
384 | HDCP_SHA1_COMPLETE, | 428 | HDCP_SHA1_COMPLETE, |
385 | HDCP_SHA1_COMPLETE, 1)) { | 429 | HDCP_SHA1_COMPLETE, 1)) { |
386 | DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n"); | 430 | DRM_ERROR("Timed out waiting for SHA1 complete\n"); |
387 | return -ETIMEDOUT; | 431 | return -ETIMEDOUT; |
388 | } | 432 | } |
389 | if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { | 433 | if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) { |
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, | |||
404 | 448 | ||
405 | ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); | 449 | ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); |
406 | if (ret) { | 450 | if (ret) { |
407 | DRM_ERROR("KSV list failed to become ready (%d)\n", ret); | 451 | DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret); |
408 | return ret; | 452 | return ret; |
409 | } | 453 | } |
410 | 454 | ||
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, | |||
414 | 458 | ||
415 | if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || | 459 | if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || |
416 | DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { | 460 | DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { |
417 | DRM_ERROR("Max Topology Limit Exceeded\n"); | 461 | DRM_DEBUG_KMS("Max Topology Limit Exceeded\n"); |
418 | return -EPERM; | 462 | return -EPERM; |
419 | } | 463 | } |
420 | 464 | ||
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, | |||
450 | } | 494 | } |
451 | 495 | ||
452 | if (i == tries) { | 496 | if (i == tries) { |
453 | DRM_ERROR("V Prime validation failed.(%d)\n", ret); | 497 | DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret); |
454 | goto err; | 498 | goto err; |
455 | } | 499 | } |
456 | 500 | ||
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, | |||
499 | if (ret) | 543 | if (ret) |
500 | return ret; | 544 | return ret; |
501 | if (!hdcp_capable) { | 545 | if (!hdcp_capable) { |
502 | DRM_ERROR("Panel is not HDCP capable\n"); | 546 | DRM_DEBUG_KMS("Panel is not HDCP capable\n"); |
503 | return -EINVAL; | 547 | return -EINVAL; |
504 | } | 548 | } |
505 | } | 549 | } |
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, | |||
527 | 571 | ||
528 | memset(&bksv, 0, sizeof(bksv)); | 572 | memset(&bksv, 0, sizeof(bksv)); |
529 | 573 | ||
530 | /* HDCP spec states that we must retry the bksv if it is invalid */ | 574 | ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); |
531 | for (i = 0; i < tries; i++) { | 575 | if (ret < 0) |
532 | ret = shim->read_bksv(intel_dig_port, bksv.shim); | 576 | return ret; |
533 | if (ret) | ||
534 | return ret; | ||
535 | if (intel_hdcp_is_ksv_valid(bksv.shim)) | ||
536 | break; | ||
537 | } | ||
538 | if (i == tries) { | ||
539 | DRM_ERROR("HDCP failed, Bksv is invalid\n"); | ||
540 | return -ENODEV; | ||
541 | } | ||
542 | 577 | ||
543 | I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); | 578 | I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); |
544 | I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); | 579 | I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); |
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, | |||
594 | } | 629 | } |
595 | 630 | ||
596 | if (i == tries) { | 631 | if (i == tries) { |
597 | DRM_ERROR("Timed out waiting for Ri prime match (%x)\n", | 632 | DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", |
598 | I915_READ(PORT_HDCP_STATUS(port))); | 633 | I915_READ(PORT_HDCP_STATUS(port))); |
599 | return -ETIMEDOUT; | 634 | return -ETIMEDOUT; |
600 | } | 635 | } |
601 | 636 | ||
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, | |||
618 | return 0; | 653 | return 0; |
619 | } | 654 | } |
620 | 655 | ||
621 | static | ||
622 | struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) | ||
623 | { | ||
624 | return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); | ||
625 | } | ||
626 | |||
627 | static int _intel_hdcp_disable(struct intel_connector *connector) | 656 | static int _intel_hdcp_disable(struct intel_connector *connector) |
628 | { | 657 | { |
658 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
629 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; | 659 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; |
630 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | 660 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); |
631 | enum port port = intel_dig_port->base.port; | 661 | enum port port = intel_dig_port->base.port; |
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) | |||
641 | return -ETIMEDOUT; | 671 | return -ETIMEDOUT; |
642 | } | 672 | } |
643 | 673 | ||
644 | ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false); | 674 | ret = hdcp->shim->toggle_signalling(intel_dig_port, false); |
645 | if (ret) { | 675 | if (ret) { |
646 | DRM_ERROR("Failed to disable HDCP signalling\n"); | 676 | DRM_ERROR("Failed to disable HDCP signalling\n"); |
647 | return ret; | 677 | return ret; |
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) | |||
653 | 683 | ||
654 | static int _intel_hdcp_enable(struct intel_connector *connector) | 684 | static int _intel_hdcp_enable(struct intel_connector *connector) |
655 | { | 685 | { |
686 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
656 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; | 687 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; |
657 | int i, ret, tries = 3; | 688 | int i, ret, tries = 3; |
658 | 689 | ||
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector) | |||
677 | 708 | ||
678 | /* Incase of authentication failures, HDCP spec expects reauth. */ | 709 | /* Incase of authentication failures, HDCP spec expects reauth. */ |
679 | for (i = 0; i < tries; i++) { | 710 | for (i = 0; i < tries; i++) { |
680 | ret = intel_hdcp_auth(conn_to_dig_port(connector), | 711 | ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim); |
681 | connector->hdcp_shim); | ||
682 | if (!ret) | 712 | if (!ret) |
683 | return 0; | 713 | return 0; |
684 | 714 | ||
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector) | |||
688 | _intel_hdcp_disable(connector); | 718 | _intel_hdcp_disable(connector); |
689 | } | 719 | } |
690 | 720 | ||
691 | DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); | 721 | DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret); |
692 | return ret; | 722 | return ret; |
693 | } | 723 | } |
694 | 724 | ||
725 | static inline | ||
726 | struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) | ||
727 | { | ||
728 | return container_of(hdcp, struct intel_connector, hdcp); | ||
729 | } | ||
730 | |||
695 | static void intel_hdcp_check_work(struct work_struct *work) | 731 | static void intel_hdcp_check_work(struct work_struct *work) |
696 | { | 732 | { |
697 | struct intel_connector *connector = container_of(to_delayed_work(work), | 733 | struct intel_hdcp *hdcp = container_of(to_delayed_work(work), |
698 | struct intel_connector, | 734 | struct intel_hdcp, |
699 | hdcp_check_work); | 735 | check_work); |
736 | struct intel_connector *connector = intel_hdcp_to_connector(hdcp); | ||
737 | |||
700 | if (!intel_hdcp_check_link(connector)) | 738 | if (!intel_hdcp_check_link(connector)) |
701 | schedule_delayed_work(&connector->hdcp_check_work, | 739 | schedule_delayed_work(&hdcp->check_work, |
702 | DRM_HDCP_CHECK_PERIOD_MS); | 740 | DRM_HDCP_CHECK_PERIOD_MS); |
703 | } | 741 | } |
704 | 742 | ||
705 | static void intel_hdcp_prop_work(struct work_struct *work) | 743 | static void intel_hdcp_prop_work(struct work_struct *work) |
706 | { | 744 | { |
707 | struct intel_connector *connector = container_of(work, | 745 | struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp, |
708 | struct intel_connector, | 746 | prop_work); |
709 | hdcp_prop_work); | 747 | struct intel_connector *connector = intel_hdcp_to_connector(hdcp); |
710 | struct drm_device *dev = connector->base.dev; | 748 | struct drm_device *dev = connector->base.dev; |
711 | struct drm_connector_state *state; | 749 | struct drm_connector_state *state; |
712 | 750 | ||
713 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); | 751 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); |
714 | mutex_lock(&connector->hdcp_mutex); | 752 | mutex_lock(&hdcp->mutex); |
715 | 753 | ||
716 | /* | 754 | /* |
717 | * This worker is only used to flip between ENABLED/DESIRED. Either of | 755 | * This worker is only used to flip between ENABLED/DESIRED. Either of |
718 | * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED, | 756 | * those to UNDESIRED is handled by core. If value == UNDESIRED, |
719 | * we're running just after hdcp has been disabled, so just exit | 757 | * we're running just after hdcp has been disabled, so just exit |
720 | */ | 758 | */ |
721 | if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { | 759 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { |
722 | state = connector->base.state; | 760 | state = connector->base.state; |
723 | state->content_protection = connector->hdcp_value; | 761 | state->content_protection = hdcp->value; |
724 | } | 762 | } |
725 | 763 | ||
726 | mutex_unlock(&connector->hdcp_mutex); | 764 | mutex_unlock(&hdcp->mutex); |
727 | drm_modeset_unlock(&dev->mode_config.connection_mutex); | 765 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
728 | } | 766 | } |
729 | 767 | ||
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) | |||
735 | } | 773 | } |
736 | 774 | ||
737 | int intel_hdcp_init(struct intel_connector *connector, | 775 | int intel_hdcp_init(struct intel_connector *connector, |
738 | const struct intel_hdcp_shim *hdcp_shim) | 776 | const struct intel_hdcp_shim *shim) |
739 | { | 777 | { |
778 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
740 | int ret; | 779 | int ret; |
741 | 780 | ||
742 | ret = drm_connector_attach_content_protection_property( | 781 | ret = drm_connector_attach_content_protection_property( |
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector, | |||
744 | if (ret) | 783 | if (ret) |
745 | return ret; | 784 | return ret; |
746 | 785 | ||
747 | connector->hdcp_shim = hdcp_shim; | 786 | hdcp->shim = shim; |
748 | mutex_init(&connector->hdcp_mutex); | 787 | mutex_init(&hdcp->mutex); |
749 | INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); | 788 | INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); |
750 | INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); | 789 | INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); |
751 | return 0; | 790 | return 0; |
752 | } | 791 | } |
753 | 792 | ||
754 | int intel_hdcp_enable(struct intel_connector *connector) | 793 | int intel_hdcp_enable(struct intel_connector *connector) |
755 | { | 794 | { |
795 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
756 | int ret; | 796 | int ret; |
757 | 797 | ||
758 | if (!connector->hdcp_shim) | 798 | if (!hdcp->shim) |
759 | return -ENOENT; | 799 | return -ENOENT; |
760 | 800 | ||
761 | mutex_lock(&connector->hdcp_mutex); | 801 | mutex_lock(&hdcp->mutex); |
762 | 802 | ||
763 | ret = _intel_hdcp_enable(connector); | 803 | ret = _intel_hdcp_enable(connector); |
764 | if (ret) | 804 | if (ret) |
765 | goto out; | 805 | goto out; |
766 | 806 | ||
767 | connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; | 807 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; |
768 | schedule_work(&connector->hdcp_prop_work); | 808 | schedule_work(&hdcp->prop_work); |
769 | schedule_delayed_work(&connector->hdcp_check_work, | 809 | schedule_delayed_work(&hdcp->check_work, |
770 | DRM_HDCP_CHECK_PERIOD_MS); | 810 | DRM_HDCP_CHECK_PERIOD_MS); |
771 | out: | 811 | out: |
772 | mutex_unlock(&connector->hdcp_mutex); | 812 | mutex_unlock(&hdcp->mutex); |
773 | return ret; | 813 | return ret; |
774 | } | 814 | } |
775 | 815 | ||
776 | int intel_hdcp_disable(struct intel_connector *connector) | 816 | int intel_hdcp_disable(struct intel_connector *connector) |
777 | { | 817 | { |
818 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
778 | int ret = 0; | 819 | int ret = 0; |
779 | 820 | ||
780 | if (!connector->hdcp_shim) | 821 | if (!hdcp->shim) |
781 | return -ENOENT; | 822 | return -ENOENT; |
782 | 823 | ||
783 | mutex_lock(&connector->hdcp_mutex); | 824 | mutex_lock(&hdcp->mutex); |
784 | 825 | ||
785 | if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { | 826 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { |
786 | connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; | 827 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; |
787 | ret = _intel_hdcp_disable(connector); | 828 | ret = _intel_hdcp_disable(connector); |
788 | } | 829 | } |
789 | 830 | ||
790 | mutex_unlock(&connector->hdcp_mutex); | 831 | mutex_unlock(&hdcp->mutex); |
791 | cancel_delayed_work_sync(&connector->hdcp_check_work); | 832 | cancel_delayed_work_sync(&hdcp->check_work); |
792 | return ret; | 833 | return ret; |
793 | } | 834 | } |
794 | 835 | ||
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, | |||
828 | /* Implements Part 3 of the HDCP authorization procedure */ | 869 | /* Implements Part 3 of the HDCP authorization procedure */ |
829 | int intel_hdcp_check_link(struct intel_connector *connector) | 870 | int intel_hdcp_check_link(struct intel_connector *connector) |
830 | { | 871 | { |
872 | struct intel_hdcp *hdcp = &connector->hdcp; | ||
831 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; | 873 | struct drm_i915_private *dev_priv = connector->base.dev->dev_private; |
832 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); | 874 | struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); |
833 | enum port port = intel_dig_port->base.port; | 875 | enum port port = intel_dig_port->base.port; |
834 | int ret = 0; | 876 | int ret = 0; |
835 | 877 | ||
836 | if (!connector->hdcp_shim) | 878 | if (!hdcp->shim) |
837 | return -ENOENT; | 879 | return -ENOENT; |
838 | 880 | ||
839 | mutex_lock(&connector->hdcp_mutex); | 881 | mutex_lock(&hdcp->mutex); |
840 | 882 | ||
841 | if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) | 883 | if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) |
842 | goto out; | 884 | goto out; |
843 | 885 | ||
844 | if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { | 886 | if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { |
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector) | |||
846 | connector->base.name, connector->base.base.id, | 888 | connector->base.name, connector->base.base.id, |
847 | I915_READ(PORT_HDCP_STATUS(port))); | 889 | I915_READ(PORT_HDCP_STATUS(port))); |
848 | ret = -ENXIO; | 890 | ret = -ENXIO; |
849 | connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | 891 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; |
850 | schedule_work(&connector->hdcp_prop_work); | 892 | schedule_work(&hdcp->prop_work); |
851 | goto out; | 893 | goto out; |
852 | } | 894 | } |
853 | 895 | ||
854 | if (connector->hdcp_shim->check_link(intel_dig_port)) { | 896 | if (hdcp->shim->check_link(intel_dig_port)) { |
855 | if (connector->hdcp_value != | 897 | if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { |
856 | DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { | 898 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; |
857 | connector->hdcp_value = | 899 | schedule_work(&hdcp->prop_work); |
858 | DRM_MODE_CONTENT_PROTECTION_ENABLED; | ||
859 | schedule_work(&connector->hdcp_prop_work); | ||
860 | } | 900 | } |
861 | goto out; | 901 | goto out; |
862 | } | 902 | } |
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector) | |||
867 | ret = _intel_hdcp_disable(connector); | 907 | ret = _intel_hdcp_disable(connector); |
868 | if (ret) { | 908 | if (ret) { |
869 | DRM_ERROR("Failed to disable hdcp (%d)\n", ret); | 909 | DRM_ERROR("Failed to disable hdcp (%d)\n", ret); |
870 | connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | 910 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; |
871 | schedule_work(&connector->hdcp_prop_work); | 911 | schedule_work(&hdcp->prop_work); |
872 | goto out; | 912 | goto out; |
873 | } | 913 | } |
874 | 914 | ||
875 | ret = _intel_hdcp_enable(connector); | 915 | ret = _intel_hdcp_enable(connector); |
876 | if (ret) { | 916 | if (ret) { |
877 | DRM_ERROR("Failed to enable hdcp (%d)\n", ret); | 917 | DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret); |
878 | connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED; | 918 | hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; |
879 | schedule_work(&connector->hdcp_prop_work); | 919 | schedule_work(&hdcp->prop_work); |
880 | goto out; | 920 | goto out; |
881 | } | 921 | } |
882 | 922 | ||
883 | out: | 923 | out: |
884 | mutex_unlock(&connector->hdcp_mutex); | 924 | mutex_unlock(&hdcp->mutex); |
885 | return ret; | 925 | return ret; |
886 | } | 926 | } |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index d7234e03fdb0..e2c6a2b3e8f2 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -148,14 +148,13 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | static void g4x_write_infoframe(struct drm_encoder *encoder, | 151 | static void g4x_write_infoframe(struct intel_encoder *encoder, |
152 | const struct intel_crtc_state *crtc_state, | 152 | const struct intel_crtc_state *crtc_state, |
153 | unsigned int type, | 153 | unsigned int type, |
154 | const void *frame, ssize_t len) | 154 | const void *frame, ssize_t len) |
155 | { | 155 | { |
156 | const u32 *data = frame; | 156 | const u32 *data = frame; |
157 | struct drm_device *dev = encoder->dev; | 157 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
158 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
159 | u32 val = I915_READ(VIDEO_DIP_CTL); | 158 | u32 val = I915_READ(VIDEO_DIP_CTL); |
160 | int i; | 159 | int i; |
161 | 160 | ||
@@ -186,31 +185,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, | |||
186 | POSTING_READ(VIDEO_DIP_CTL); | 185 | POSTING_READ(VIDEO_DIP_CTL); |
187 | } | 186 | } |
188 | 187 | ||
189 | static bool g4x_infoframe_enabled(struct drm_encoder *encoder, | 188 | static bool g4x_infoframe_enabled(struct intel_encoder *encoder, |
190 | const struct intel_crtc_state *pipe_config) | 189 | const struct intel_crtc_state *pipe_config) |
191 | { | 190 | { |
192 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 191 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
193 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | ||
194 | u32 val = I915_READ(VIDEO_DIP_CTL); | 192 | u32 val = I915_READ(VIDEO_DIP_CTL); |
195 | 193 | ||
196 | if ((val & VIDEO_DIP_ENABLE) == 0) | 194 | if ((val & VIDEO_DIP_ENABLE) == 0) |
197 | return false; | 195 | return false; |
198 | 196 | ||
199 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) | 197 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) |
200 | return false; | 198 | return false; |
201 | 199 | ||
202 | return val & (VIDEO_DIP_ENABLE_AVI | | 200 | return val & (VIDEO_DIP_ENABLE_AVI | |
203 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); | 201 | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); |
204 | } | 202 | } |
205 | 203 | ||
206 | static void ibx_write_infoframe(struct drm_encoder *encoder, | 204 | static void ibx_write_infoframe(struct intel_encoder *encoder, |
207 | const struct intel_crtc_state *crtc_state, | 205 | const struct intel_crtc_state *crtc_state, |
208 | unsigned int type, | 206 | unsigned int type, |
209 | const void *frame, ssize_t len) | 207 | const void *frame, ssize_t len) |
210 | { | 208 | { |
211 | const u32 *data = frame; | 209 | const u32 *data = frame; |
212 | struct drm_device *dev = encoder->dev; | 210 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
213 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
214 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 211 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
215 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 212 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
216 | u32 val = I915_READ(reg); | 213 | u32 val = I915_READ(reg); |
@@ -243,11 +240,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, | |||
243 | POSTING_READ(reg); | 240 | POSTING_READ(reg); |
244 | } | 241 | } |
245 | 242 | ||
246 | static bool ibx_infoframe_enabled(struct drm_encoder *encoder, | 243 | static bool ibx_infoframe_enabled(struct intel_encoder *encoder, |
247 | const struct intel_crtc_state *pipe_config) | 244 | const struct intel_crtc_state *pipe_config) |
248 | { | 245 | { |
249 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 246 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
250 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | ||
251 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; | 247 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; |
252 | i915_reg_t reg = TVIDEO_DIP_CTL(pipe); | 248 | i915_reg_t reg = TVIDEO_DIP_CTL(pipe); |
253 | u32 val = I915_READ(reg); | 249 | u32 val = I915_READ(reg); |
@@ -255,7 +251,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, | |||
255 | if ((val & VIDEO_DIP_ENABLE) == 0) | 251 | if ((val & VIDEO_DIP_ENABLE) == 0) |
256 | return false; | 252 | return false; |
257 | 253 | ||
258 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) | 254 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) |
259 | return false; | 255 | return false; |
260 | 256 | ||
261 | return val & (VIDEO_DIP_ENABLE_AVI | | 257 | return val & (VIDEO_DIP_ENABLE_AVI | |
@@ -263,14 +259,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder, | |||
263 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 259 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
264 | } | 260 | } |
265 | 261 | ||
266 | static void cpt_write_infoframe(struct drm_encoder *encoder, | 262 | static void cpt_write_infoframe(struct intel_encoder *encoder, |
267 | const struct intel_crtc_state *crtc_state, | 263 | const struct intel_crtc_state *crtc_state, |
268 | unsigned int type, | 264 | unsigned int type, |
269 | const void *frame, ssize_t len) | 265 | const void *frame, ssize_t len) |
270 | { | 266 | { |
271 | const u32 *data = frame; | 267 | const u32 *data = frame; |
272 | struct drm_device *dev = encoder->dev; | 268 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
273 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
274 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 269 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
275 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 270 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
276 | u32 val = I915_READ(reg); | 271 | u32 val = I915_READ(reg); |
@@ -306,10 +301,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, | |||
306 | POSTING_READ(reg); | 301 | POSTING_READ(reg); |
307 | } | 302 | } |
308 | 303 | ||
309 | static bool cpt_infoframe_enabled(struct drm_encoder *encoder, | 304 | static bool cpt_infoframe_enabled(struct intel_encoder *encoder, |
310 | const struct intel_crtc_state *pipe_config) | 305 | const struct intel_crtc_state *pipe_config) |
311 | { | 306 | { |
312 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 307 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
313 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; | 308 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; |
314 | u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); | 309 | u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); |
315 | 310 | ||
@@ -321,14 +316,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder, | |||
321 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 316 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
322 | } | 317 | } |
323 | 318 | ||
324 | static void vlv_write_infoframe(struct drm_encoder *encoder, | 319 | static void vlv_write_infoframe(struct intel_encoder *encoder, |
325 | const struct intel_crtc_state *crtc_state, | 320 | const struct intel_crtc_state *crtc_state, |
326 | unsigned int type, | 321 | unsigned int type, |
327 | const void *frame, ssize_t len) | 322 | const void *frame, ssize_t len) |
328 | { | 323 | { |
329 | const u32 *data = frame; | 324 | const u32 *data = frame; |
330 | struct drm_device *dev = encoder->dev; | 325 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
331 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
332 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 326 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
333 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 327 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
334 | u32 val = I915_READ(reg); | 328 | u32 val = I915_READ(reg); |
@@ -361,18 +355,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, | |||
361 | POSTING_READ(reg); | 355 | POSTING_READ(reg); |
362 | } | 356 | } |
363 | 357 | ||
364 | static bool vlv_infoframe_enabled(struct drm_encoder *encoder, | 358 | static bool vlv_infoframe_enabled(struct intel_encoder *encoder, |
365 | const struct intel_crtc_state *pipe_config) | 359 | const struct intel_crtc_state *pipe_config) |
366 | { | 360 | { |
367 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 361 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
368 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | ||
369 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; | 362 | enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe; |
370 | u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); | 363 | u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); |
371 | 364 | ||
372 | if ((val & VIDEO_DIP_ENABLE) == 0) | 365 | if ((val & VIDEO_DIP_ENABLE) == 0) |
373 | return false; | 366 | return false; |
374 | 367 | ||
375 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port)) | 368 | if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) |
376 | return false; | 369 | return false; |
377 | 370 | ||
378 | return val & (VIDEO_DIP_ENABLE_AVI | | 371 | return val & (VIDEO_DIP_ENABLE_AVI | |
@@ -380,14 +373,13 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder, | |||
380 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); | 373 | VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); |
381 | } | 374 | } |
382 | 375 | ||
383 | static void hsw_write_infoframe(struct drm_encoder *encoder, | 376 | static void hsw_write_infoframe(struct intel_encoder *encoder, |
384 | const struct intel_crtc_state *crtc_state, | 377 | const struct intel_crtc_state *crtc_state, |
385 | unsigned int type, | 378 | unsigned int type, |
386 | const void *frame, ssize_t len) | 379 | const void *frame, ssize_t len) |
387 | { | 380 | { |
388 | const u32 *data = frame; | 381 | const u32 *data = frame; |
389 | struct drm_device *dev = encoder->dev; | 382 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
390 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
391 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | 383 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
392 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); | 384 | i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); |
393 | int data_size = type == DP_SDP_VSC ? | 385 | int data_size = type == DP_SDP_VSC ? |
@@ -415,10 +407,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, | |||
415 | POSTING_READ(ctl_reg); | 407 | POSTING_READ(ctl_reg); |
416 | } | 408 | } |
417 | 409 | ||
418 | static bool hsw_infoframe_enabled(struct drm_encoder *encoder, | 410 | static bool hsw_infoframe_enabled(struct intel_encoder *encoder, |
419 | const struct intel_crtc_state *pipe_config) | 411 | const struct intel_crtc_state *pipe_config) |
420 | { | 412 | { |
421 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 413 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
422 | u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); | 414 | u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); |
423 | 415 | ||
424 | return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | | 416 | return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW | |
@@ -443,11 +435,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder, | |||
443 | * trick them by giving an offset into the buffer and moving back the header | 435 | * trick them by giving an offset into the buffer and moving back the header |
444 | * bytes by one. | 436 | * bytes by one. |
445 | */ | 437 | */ |
446 | static void intel_write_infoframe(struct drm_encoder *encoder, | 438 | static void intel_write_infoframe(struct intel_encoder *encoder, |
447 | const struct intel_crtc_state *crtc_state, | 439 | const struct intel_crtc_state *crtc_state, |
448 | union hdmi_infoframe *frame) | 440 | union hdmi_infoframe *frame) |
449 | { | 441 | { |
450 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 442 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); |
451 | u8 buffer[VIDEO_DIP_DATA_SIZE]; | 443 | u8 buffer[VIDEO_DIP_DATA_SIZE]; |
452 | ssize_t len; | 444 | ssize_t len; |
453 | 445 | ||
@@ -457,20 +449,20 @@ static void intel_write_infoframe(struct drm_encoder *encoder, | |||
457 | return; | 449 | return; |
458 | 450 | ||
459 | /* Insert the 'hole' (see big comment above) at position 3 */ | 451 | /* Insert the 'hole' (see big comment above) at position 3 */ |
460 | buffer[0] = buffer[1]; | 452 | memmove(&buffer[0], &buffer[1], 3); |
461 | buffer[1] = buffer[2]; | ||
462 | buffer[2] = buffer[3]; | ||
463 | buffer[3] = 0; | 453 | buffer[3] = 0; |
464 | len++; | 454 | len++; |
465 | 455 | ||
466 | intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); | 456 | intel_dig_port->write_infoframe(encoder, |
457 | crtc_state, | ||
458 | frame->any.type, buffer, len); | ||
467 | } | 459 | } |
468 | 460 | ||
469 | static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | 461 | static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder, |
470 | const struct intel_crtc_state *crtc_state, | 462 | const struct intel_crtc_state *crtc_state, |
471 | const struct drm_connector_state *conn_state) | 463 | const struct drm_connector_state *conn_state) |
472 | { | 464 | { |
473 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 465 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
474 | const struct drm_display_mode *adjusted_mode = | 466 | const struct drm_display_mode *adjusted_mode = |
475 | &crtc_state->base.adjusted_mode; | 467 | &crtc_state->base.adjusted_mode; |
476 | struct drm_connector *connector = &intel_hdmi->attached_connector->base; | 468 | struct drm_connector *connector = &intel_hdmi->attached_connector->base; |
@@ -487,8 +479,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | |||
487 | return; | 479 | return; |
488 | } | 480 | } |
489 | 481 | ||
490 | if (crtc_state->ycbcr420) | 482 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) |
491 | frame.avi.colorspace = HDMI_COLORSPACE_YUV420; | 483 | frame.avi.colorspace = HDMI_COLORSPACE_YUV420; |
484 | else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) | ||
485 | frame.avi.colorspace = HDMI_COLORSPACE_YUV444; | ||
492 | else | 486 | else |
493 | frame.avi.colorspace = HDMI_COLORSPACE_RGB; | 487 | frame.avi.colorspace = HDMI_COLORSPACE_RGB; |
494 | 488 | ||
@@ -503,10 +497,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, | |||
503 | conn_state); | 497 | conn_state); |
504 | 498 | ||
505 | /* TODO: handle pixel repetition for YCBCR420 outputs */ | 499 | /* TODO: handle pixel repetition for YCBCR420 outputs */ |
506 | intel_write_infoframe(encoder, crtc_state, &frame); | 500 | intel_write_infoframe(encoder, crtc_state, |
501 | &frame); | ||
507 | } | 502 | } |
508 | 503 | ||
509 | static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, | 504 | static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder, |
510 | const struct intel_crtc_state *crtc_state) | 505 | const struct intel_crtc_state *crtc_state) |
511 | { | 506 | { |
512 | union hdmi_infoframe frame; | 507 | union hdmi_infoframe frame; |
@@ -520,11 +515,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder, | |||
520 | 515 | ||
521 | frame.spd.sdi = HDMI_SPD_SDI_PC; | 516 | frame.spd.sdi = HDMI_SPD_SDI_PC; |
522 | 517 | ||
523 | intel_write_infoframe(encoder, crtc_state, &frame); | 518 | intel_write_infoframe(encoder, crtc_state, |
519 | &frame); | ||
524 | } | 520 | } |
525 | 521 | ||
526 | static void | 522 | static void |
527 | intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, | 523 | intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder, |
528 | const struct intel_crtc_state *crtc_state, | 524 | const struct intel_crtc_state *crtc_state, |
529 | const struct drm_connector_state *conn_state) | 525 | const struct drm_connector_state *conn_state) |
530 | { | 526 | { |
@@ -537,20 +533,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, | |||
537 | if (ret < 0) | 533 | if (ret < 0) |
538 | return; | 534 | return; |
539 | 535 | ||
540 | intel_write_infoframe(encoder, crtc_state, &frame); | 536 | intel_write_infoframe(encoder, crtc_state, |
537 | &frame); | ||
541 | } | 538 | } |
542 | 539 | ||
543 | static void g4x_set_infoframes(struct drm_encoder *encoder, | 540 | static void g4x_set_infoframes(struct intel_encoder *encoder, |
544 | bool enable, | 541 | bool enable, |
545 | const struct intel_crtc_state *crtc_state, | 542 | const struct intel_crtc_state *crtc_state, |
546 | const struct drm_connector_state *conn_state) | 543 | const struct drm_connector_state *conn_state) |
547 | { | 544 | { |
548 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 545 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
549 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 546 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); |
550 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 547 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
551 | i915_reg_t reg = VIDEO_DIP_CTL; | 548 | i915_reg_t reg = VIDEO_DIP_CTL; |
552 | u32 val = I915_READ(reg); | 549 | u32 val = I915_READ(reg); |
553 | u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); | 550 | u32 port = VIDEO_DIP_PORT(encoder->port); |
554 | 551 | ||
555 | assert_hdmi_port_disabled(intel_hdmi); | 552 | assert_hdmi_port_disabled(intel_hdmi); |
556 | 553 | ||
@@ -658,11 +655,11 @@ static bool gcp_default_phase_possible(int pipe_bpp, | |||
658 | mode->crtc_htotal/2 % pixels_per_group == 0); | 655 | mode->crtc_htotal/2 % pixels_per_group == 0); |
659 | } | 656 | } |
660 | 657 | ||
661 | static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, | 658 | static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, |
662 | const struct intel_crtc_state *crtc_state, | 659 | const struct intel_crtc_state *crtc_state, |
663 | const struct drm_connector_state *conn_state) | 660 | const struct drm_connector_state *conn_state) |
664 | { | 661 | { |
665 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 662 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
666 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | 663 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); |
667 | i915_reg_t reg; | 664 | i915_reg_t reg; |
668 | u32 val = 0; | 665 | u32 val = 0; |
@@ -690,18 +687,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder, | |||
690 | return val != 0; | 687 | return val != 0; |
691 | } | 688 | } |
692 | 689 | ||
693 | static void ibx_set_infoframes(struct drm_encoder *encoder, | 690 | static void ibx_set_infoframes(struct intel_encoder *encoder, |
694 | bool enable, | 691 | bool enable, |
695 | const struct intel_crtc_state *crtc_state, | 692 | const struct intel_crtc_state *crtc_state, |
696 | const struct drm_connector_state *conn_state) | 693 | const struct drm_connector_state *conn_state) |
697 | { | 694 | { |
698 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 695 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
699 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 696 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
700 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 697 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); |
701 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; | 698 | struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
702 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 699 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
703 | u32 val = I915_READ(reg); | 700 | u32 val = I915_READ(reg); |
704 | u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); | 701 | u32 port = VIDEO_DIP_PORT(encoder->port); |
705 | 702 | ||
706 | assert_hdmi_port_disabled(intel_hdmi); | 703 | assert_hdmi_port_disabled(intel_hdmi); |
707 | 704 | ||
@@ -743,14 +740,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, | |||
743 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); | 740 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); |
744 | } | 741 | } |
745 | 742 | ||
746 | static void cpt_set_infoframes(struct drm_encoder *encoder, | 743 | static void cpt_set_infoframes(struct intel_encoder *encoder, |
747 | bool enable, | 744 | bool enable, |
748 | const struct intel_crtc_state *crtc_state, | 745 | const struct intel_crtc_state *crtc_state, |
749 | const struct drm_connector_state *conn_state) | 746 | const struct drm_connector_state *conn_state) |
750 | { | 747 | { |
751 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 748 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
752 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 749 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
753 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 750 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
754 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); | 751 | i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
755 | u32 val = I915_READ(reg); | 752 | u32 val = I915_READ(reg); |
756 | 753 | ||
@@ -786,18 +783,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, | |||
786 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); | 783 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); |
787 | } | 784 | } |
788 | 785 | ||
789 | static void vlv_set_infoframes(struct drm_encoder *encoder, | 786 | static void vlv_set_infoframes(struct intel_encoder *encoder, |
790 | bool enable, | 787 | bool enable, |
791 | const struct intel_crtc_state *crtc_state, | 788 | const struct intel_crtc_state *crtc_state, |
792 | const struct drm_connector_state *conn_state) | 789 | const struct drm_connector_state *conn_state) |
793 | { | 790 | { |
794 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 791 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
795 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | ||
796 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); | 792 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); |
797 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 793 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); |
798 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); | 794 | i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); |
799 | u32 val = I915_READ(reg); | 795 | u32 val = I915_READ(reg); |
800 | u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port); | 796 | u32 port = VIDEO_DIP_PORT(encoder->port); |
801 | 797 | ||
802 | assert_hdmi_port_disabled(intel_hdmi); | 798 | assert_hdmi_port_disabled(intel_hdmi); |
803 | 799 | ||
@@ -839,12 +835,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, | |||
839 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); | 835 | intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); |
840 | } | 836 | } |
841 | 837 | ||
842 | static void hsw_set_infoframes(struct drm_encoder *encoder, | 838 | static void hsw_set_infoframes(struct intel_encoder *encoder, |
843 | bool enable, | 839 | bool enable, |
844 | const struct intel_crtc_state *crtc_state, | 840 | const struct intel_crtc_state *crtc_state, |
845 | const struct drm_connector_state *conn_state) | 841 | const struct drm_connector_state *conn_state) |
846 | { | 842 | { |
847 | struct drm_i915_private *dev_priv = to_i915(encoder->dev); | 843 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
848 | i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); | 844 | i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); |
849 | u32 val = I915_READ(reg); | 845 | u32 val = I915_READ(reg); |
850 | 846 | ||
@@ -966,13 +962,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, | |||
966 | ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, | 962 | ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, |
967 | DRM_HDCP_AN_LEN); | 963 | DRM_HDCP_AN_LEN); |
968 | if (ret) { | 964 | if (ret) { |
969 | DRM_ERROR("Write An over DDC failed (%d)\n", ret); | 965 | DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret); |
970 | return ret; | 966 | return ret; |
971 | } | 967 | } |
972 | 968 | ||
973 | ret = intel_gmbus_output_aksv(adapter); | 969 | ret = intel_gmbus_output_aksv(adapter); |
974 | if (ret < 0) { | 970 | if (ret < 0) { |
975 | DRM_ERROR("Failed to output aksv (%d)\n", ret); | 971 | DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret); |
976 | return ret; | 972 | return ret; |
977 | } | 973 | } |
978 | return 0; | 974 | return 0; |
@@ -985,7 +981,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, | |||
985 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, | 981 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, |
986 | DRM_HDCP_KSV_LEN); | 982 | DRM_HDCP_KSV_LEN); |
987 | if (ret) | 983 | if (ret) |
988 | DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret); | 984 | DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret); |
989 | return ret; | 985 | return ret; |
990 | } | 986 | } |
991 | 987 | ||
@@ -997,7 +993,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, | |||
997 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, | 993 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, |
998 | bstatus, DRM_HDCP_BSTATUS_LEN); | 994 | bstatus, DRM_HDCP_BSTATUS_LEN); |
999 | if (ret) | 995 | if (ret) |
1000 | DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret); | 996 | DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret); |
1001 | return ret; | 997 | return ret; |
1002 | } | 998 | } |
1003 | 999 | ||
@@ -1010,7 +1006,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, | |||
1010 | 1006 | ||
1011 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); | 1007 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); |
1012 | if (ret) { | 1008 | if (ret) { |
1013 | DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); | 1009 | DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); |
1014 | return ret; | 1010 | return ret; |
1015 | } | 1011 | } |
1016 | *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; | 1012 | *repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT; |
@@ -1025,7 +1021,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, | |||
1025 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, | 1021 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, |
1026 | ri_prime, DRM_HDCP_RI_LEN); | 1022 | ri_prime, DRM_HDCP_RI_LEN); |
1027 | if (ret) | 1023 | if (ret) |
1028 | DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret); | 1024 | DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret); |
1029 | return ret; | 1025 | return ret; |
1030 | } | 1026 | } |
1031 | 1027 | ||
@@ -1038,7 +1034,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, | |||
1038 | 1034 | ||
1039 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); | 1035 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); |
1040 | if (ret) { | 1036 | if (ret) { |
1041 | DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret); | 1037 | DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret); |
1042 | return ret; | 1038 | return ret; |
1043 | } | 1039 | } |
1044 | *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; | 1040 | *ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY; |
@@ -1053,7 +1049,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, | |||
1053 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, | 1049 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, |
1054 | ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); | 1050 | ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); |
1055 | if (ret) { | 1051 | if (ret) { |
1056 | DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret); | 1052 | DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret); |
1057 | return ret; | 1053 | return ret; |
1058 | } | 1054 | } |
1059 | return 0; | 1055 | return 0; |
@@ -1071,7 +1067,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, | |||
1071 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), | 1067 | ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), |
1072 | part, DRM_HDCP_V_PRIME_PART_LEN); | 1068 | part, DRM_HDCP_V_PRIME_PART_LEN); |
1073 | if (ret) | 1069 | if (ret) |
1074 | DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret); | 1070 | DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret); |
1075 | return ret; | 1071 | return ret; |
1076 | } | 1072 | } |
1077 | 1073 | ||
@@ -1218,7 +1214,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, | |||
1218 | if (tmp & HDMI_MODE_SELECT_HDMI) | 1214 | if (tmp & HDMI_MODE_SELECT_HDMI) |
1219 | pipe_config->has_hdmi_sink = true; | 1215 | pipe_config->has_hdmi_sink = true; |
1220 | 1216 | ||
1221 | if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) | 1217 | if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) |
1222 | pipe_config->has_infoframe = true; | 1218 | pipe_config->has_infoframe = true; |
1223 | 1219 | ||
1224 | if (tmp & SDVO_AUDIO_ENABLE) | 1220 | if (tmp & SDVO_AUDIO_ENABLE) |
@@ -1439,7 +1435,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, | |||
1439 | intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); | 1435 | intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); |
1440 | } | 1436 | } |
1441 | 1437 | ||
1442 | intel_dig_port->set_infoframes(&encoder->base, false, | 1438 | intel_dig_port->set_infoframes(encoder, |
1439 | false, | ||
1443 | old_crtc_state, old_conn_state); | 1440 | old_crtc_state, old_conn_state); |
1444 | 1441 | ||
1445 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); | 1442 | intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); |
@@ -1598,6 +1595,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, | |||
1598 | struct drm_atomic_state *state = crtc_state->base.state; | 1595 | struct drm_atomic_state *state = crtc_state->base.state; |
1599 | struct drm_connector_state *connector_state; | 1596 | struct drm_connector_state *connector_state; |
1600 | struct drm_connector *connector; | 1597 | struct drm_connector *connector; |
1598 | const struct drm_display_mode *adjusted_mode = | ||
1599 | &crtc_state->base.adjusted_mode; | ||
1601 | int i; | 1600 | int i; |
1602 | 1601 | ||
1603 | if (HAS_GMCH_DISPLAY(dev_priv)) | 1602 | if (HAS_GMCH_DISPLAY(dev_priv)) |
@@ -1625,7 +1624,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, | |||
1625 | if (connector_state->crtc != crtc_state->base.crtc) | 1624 | if (connector_state->crtc != crtc_state->base.crtc) |
1626 | continue; | 1625 | continue; |
1627 | 1626 | ||
1628 | if (crtc_state->ycbcr420) { | 1627 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { |
1629 | const struct drm_hdmi_info *hdmi = &info->hdmi; | 1628 | const struct drm_hdmi_info *hdmi = &info->hdmi; |
1630 | 1629 | ||
1631 | if (bpc == 12 && !(hdmi->y420_dc_modes & | 1630 | if (bpc == 12 && !(hdmi->y420_dc_modes & |
@@ -1646,7 +1645,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, | |||
1646 | 1645 | ||
1647 | /* Display WA #1139: glk */ | 1646 | /* Display WA #1139: glk */ |
1648 | if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && | 1647 | if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) && |
1649 | crtc_state->base.adjusted_mode.htotal > 5460) | 1648 | adjusted_mode->htotal > 5460) |
1649 | return false; | ||
1650 | |||
1651 | /* Display Wa_1405510057:icl */ | ||
1652 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && | ||
1653 | bpc == 10 && IS_ICELAKE(dev_priv) && | ||
1654 | (adjusted_mode->crtc_hblank_end - | ||
1655 | adjusted_mode->crtc_hblank_start) % 8 == 2) | ||
1650 | return false; | 1656 | return false; |
1651 | 1657 | ||
1652 | return true; | 1658 | return true; |
@@ -1670,7 +1676,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, | |||
1670 | *clock_12bpc /= 2; | 1676 | *clock_12bpc /= 2; |
1671 | *clock_10bpc /= 2; | 1677 | *clock_10bpc /= 2; |
1672 | *clock_8bpc /= 2; | 1678 | *clock_8bpc /= 2; |
1673 | config->ycbcr420 = true; | 1679 | config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; |
1674 | 1680 | ||
1675 | /* YCBCR 420 output conversion needs a scaler */ | 1681 | /* YCBCR 420 output conversion needs a scaler */ |
1676 | if (skl_update_scaler_crtc(config)) { | 1682 | if (skl_update_scaler_crtc(config)) { |
@@ -1704,6 +1710,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, | |||
1704 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | 1710 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
1705 | return false; | 1711 | return false; |
1706 | 1712 | ||
1713 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
1707 | pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; | 1714 | pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; |
1708 | 1715 | ||
1709 | if (pipe_config->has_hdmi_sink) | 1716 | if (pipe_config->has_hdmi_sink) |
@@ -1974,7 +1981,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1974 | 1981 | ||
1975 | intel_hdmi_prepare(encoder, pipe_config); | 1982 | intel_hdmi_prepare(encoder, pipe_config); |
1976 | 1983 | ||
1977 | intel_dig_port->set_infoframes(&encoder->base, | 1984 | intel_dig_port->set_infoframes(encoder, |
1978 | pipe_config->has_infoframe, | 1985 | pipe_config->has_infoframe, |
1979 | pipe_config, conn_state); | 1986 | pipe_config, conn_state); |
1980 | } | 1987 | } |
@@ -1992,7 +1999,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
1992 | vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, | 1999 | vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, |
1993 | 0x2b247878); | 2000 | 0x2b247878); |
1994 | 2001 | ||
1995 | dport->set_infoframes(&encoder->base, | 2002 | dport->set_infoframes(encoder, |
1996 | pipe_config->has_infoframe, | 2003 | pipe_config->has_infoframe, |
1997 | pipe_config, conn_state); | 2004 | pipe_config, conn_state); |
1998 | 2005 | ||
@@ -2063,7 +2070,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
2063 | /* Use 800mV-0dB */ | 2070 | /* Use 800mV-0dB */ |
2064 | chv_set_phy_signal_level(encoder, 128, 102, false); | 2071 | chv_set_phy_signal_level(encoder, 128, 102, false); |
2065 | 2072 | ||
2066 | dport->set_infoframes(&encoder->base, | 2073 | dport->set_infoframes(encoder, |
2067 | pipe_config->has_infoframe, | 2074 | pipe_config->has_infoframe, |
2068 | pipe_config, conn_state); | 2075 | pipe_config, conn_state); |
2069 | 2076 | ||
@@ -2075,13 +2082,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, | |||
2075 | chv_phy_release_cl2_override(encoder); | 2082 | chv_phy_release_cl2_override(encoder); |
2076 | } | 2083 | } |
2077 | 2084 | ||
2085 | static int | ||
2086 | intel_hdmi_connector_register(struct drm_connector *connector) | ||
2087 | { | ||
2088 | int ret; | ||
2089 | |||
2090 | ret = intel_connector_register(connector); | ||
2091 | if (ret) | ||
2092 | return ret; | ||
2093 | |||
2094 | i915_debugfs_connector_add(connector); | ||
2095 | |||
2096 | return ret; | ||
2097 | } | ||
2098 | |||
2078 | static void intel_hdmi_destroy(struct drm_connector *connector) | 2099 | static void intel_hdmi_destroy(struct drm_connector *connector) |
2079 | { | 2100 | { |
2080 | if (intel_attached_hdmi(connector)->cec_notifier) | 2101 | if (intel_attached_hdmi(connector)->cec_notifier) |
2081 | cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); | 2102 | cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); |
2082 | kfree(to_intel_connector(connector)->detect_edid); | 2103 | |
2083 | drm_connector_cleanup(connector); | 2104 | intel_connector_destroy(connector); |
2084 | kfree(connector); | ||
2085 | } | 2105 | } |
2086 | 2106 | ||
2087 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | 2107 | static const struct drm_connector_funcs intel_hdmi_connector_funcs = { |
@@ -2090,7 +2110,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { | |||
2090 | .fill_modes = drm_helper_probe_single_connector_modes, | 2110 | .fill_modes = drm_helper_probe_single_connector_modes, |
2091 | .atomic_get_property = intel_digital_connector_atomic_get_property, | 2111 | .atomic_get_property = intel_digital_connector_atomic_get_property, |
2092 | .atomic_set_property = intel_digital_connector_atomic_set_property, | 2112 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
2093 | .late_register = intel_connector_register, | 2113 | .late_register = intel_hdmi_connector_register, |
2094 | .early_unregister = intel_connector_unregister, | 2114 | .early_unregister = intel_connector_unregister, |
2095 | .destroy = intel_hdmi_destroy, | 2115 | .destroy = intel_hdmi_destroy, |
2096 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 2116 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
@@ -2110,11 +2130,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { | |||
2110 | static void | 2130 | static void |
2111 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) | 2131 | intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) |
2112 | { | 2132 | { |
2133 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | ||
2134 | |||
2113 | intel_attach_force_audio_property(connector); | 2135 | intel_attach_force_audio_property(connector); |
2114 | intel_attach_broadcast_rgb_property(connector); | 2136 | intel_attach_broadcast_rgb_property(connector); |
2115 | intel_attach_aspect_ratio_property(connector); | 2137 | intel_attach_aspect_ratio_property(connector); |
2116 | drm_connector_attach_content_type_property(connector); | 2138 | drm_connector_attach_content_type_property(connector); |
2117 | connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; | 2139 | connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE; |
2140 | |||
2141 | if (!HAS_GMCH_DISPLAY(dev_priv)) | ||
2142 | drm_connector_attach_max_bpc_property(connector, 8, 12); | ||
2118 | } | 2143 | } |
2119 | 2144 | ||
2120 | /* | 2145 | /* |
@@ -2325,9 +2350,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port) | |||
2325 | intel_dig_port->set_infoframes = g4x_set_infoframes; | 2350 | intel_dig_port->set_infoframes = g4x_set_infoframes; |
2326 | intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; | 2351 | intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; |
2327 | } else if (HAS_DDI(dev_priv)) { | 2352 | } else if (HAS_DDI(dev_priv)) { |
2328 | intel_dig_port->write_infoframe = hsw_write_infoframe; | 2353 | if (intel_dig_port->lspcon.active) { |
2329 | intel_dig_port->set_infoframes = hsw_set_infoframes; | 2354 | intel_dig_port->write_infoframe = |
2330 | intel_dig_port->infoframe_enabled = hsw_infoframe_enabled; | 2355 | lspcon_write_infoframe; |
2356 | intel_dig_port->set_infoframes = lspcon_set_infoframes; | ||
2357 | intel_dig_port->infoframe_enabled = | ||
2358 | lspcon_infoframe_enabled; | ||
2359 | } else { | ||
2360 | intel_dig_port->set_infoframes = hsw_set_infoframes; | ||
2361 | intel_dig_port->infoframe_enabled = | ||
2362 | hsw_infoframe_enabled; | ||
2363 | intel_dig_port->write_infoframe = hsw_write_infoframe; | ||
2364 | } | ||
2331 | } else if (HAS_PCH_IBX(dev_priv)) { | 2365 | } else if (HAS_PCH_IBX(dev_priv)) { |
2332 | intel_dig_port->write_infoframe = ibx_write_infoframe; | 2366 | intel_dig_port->write_infoframe = ibx_write_infoframe; |
2333 | intel_dig_port->set_infoframes = ibx_set_infoframes; | 2367 | intel_dig_port->set_infoframes = ibx_set_infoframes; |
@@ -2486,5 +2520,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, | |||
2486 | 2520 | ||
2487 | intel_infoframe_init(intel_dig_port); | 2521 | intel_infoframe_init(intel_dig_port); |
2488 | 2522 | ||
2523 | intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); | ||
2489 | intel_hdmi_init_connector(intel_dig_port, intel_connector); | 2524 | intel_hdmi_init_connector(intel_dig_port, intel_connector); |
2490 | } | 2525 | } |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 648a13c6043c..e24174d08fed 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv, | |||
114 | #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) | 114 | #define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000) |
115 | 115 | ||
116 | /** | 116 | /** |
117 | * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin | 117 | * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin |
118 | * @dev_priv: private driver data pointer | 118 | * @dev_priv: private driver data pointer |
119 | * @pin: the pin to gather stats on | 119 | * @pin: the pin to gather stats on |
120 | * @long_hpd: whether the HPD IRQ was long or short | ||
120 | * | 121 | * |
121 | * Gather stats about HPD irqs from the specified @pin, and detect irq | 122 | * Gather stats about HPD IRQs from the specified @pin, and detect IRQ |
122 | * storms. Only the pin specific stats and state are changed, the caller is | 123 | * storms. Only the pin specific stats and state are changed, the caller is |
123 | * responsible for further action. | 124 | * responsible for further action. |
124 | * | 125 | * |
125 | * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is | 126 | * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is |
126 | * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to | 127 | * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to |
127 | * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's | 128 | * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and |
128 | * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED. | 129 | * short IRQs count as +1. If this threshold is exceeded, it's considered an |
130 | * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. | ||
131 | * | ||
132 | * By default, most systems will only count long IRQs towards | ||
133 | * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also | ||
134 | * suffer from short IRQ storms and must also track these. Because short IRQ | ||
135 | * storms are naturally caused by sideband interactions with DP MST devices, | ||
136 | * short IRQ detection is only enabled for systems without DP MST support. | ||
137 | * Systems which are new enough to support DP MST are far less likely to | ||
138 | * suffer from IRQ storms at all, so this is fine. | ||
129 | * | 139 | * |
130 | * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, | 140 | * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs, |
131 | * and should only be adjusted for automated hotplug testing. | 141 | * and should only be adjusted for automated hotplug testing. |
132 | * | 142 | * |
133 | * Return true if an irq storm was detected on @pin. | 143 | * Return true if an IRQ storm was detected on @pin. |
134 | */ | 144 | */ |
135 | static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, | 145 | static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, |
136 | enum hpd_pin pin) | 146 | enum hpd_pin pin, bool long_hpd) |
137 | { | 147 | { |
138 | unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; | 148 | struct i915_hotplug *hpd = &dev_priv->hotplug; |
149 | unsigned long start = hpd->stats[pin].last_jiffies; | ||
139 | unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); | 150 | unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); |
140 | const int threshold = dev_priv->hotplug.hpd_storm_threshold; | 151 | const int increment = long_hpd ? 10 : 1; |
152 | const int threshold = hpd->hpd_storm_threshold; | ||
141 | bool storm = false; | 153 | bool storm = false; |
142 | 154 | ||
155 | if (!threshold || | ||
156 | (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled)) | ||
157 | return false; | ||
158 | |||
143 | if (!time_in_range(jiffies, start, end)) { | 159 | if (!time_in_range(jiffies, start, end)) { |
144 | dev_priv->hotplug.stats[pin].last_jiffies = jiffies; | 160 | hpd->stats[pin].last_jiffies = jiffies; |
145 | dev_priv->hotplug.stats[pin].count = 0; | 161 | hpd->stats[pin].count = 0; |
146 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin); | 162 | } |
147 | } else if (dev_priv->hotplug.stats[pin].count > threshold && | 163 | |
148 | threshold) { | 164 | hpd->stats[pin].count += increment; |
149 | dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; | 165 | if (hpd->stats[pin].count > threshold) { |
166 | hpd->stats[pin].state = HPD_MARK_DISABLED; | ||
150 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); | 167 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin); |
151 | storm = true; | 168 | storm = true; |
152 | } else { | 169 | } else { |
153 | dev_priv->hotplug.stats[pin].count++; | ||
154 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, | 170 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin, |
155 | dev_priv->hotplug.stats[pin].count); | 171 | hpd->stats[pin].count); |
156 | } | 172 | } |
157 | 173 | ||
158 | return storm; | 174 | return storm; |
159 | } | 175 | } |
160 | 176 | ||
161 | static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) | 177 | static void |
178 | intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) | ||
162 | { | 179 | { |
163 | struct drm_device *dev = &dev_priv->drm; | 180 | struct drm_device *dev = &dev_priv->drm; |
164 | struct intel_connector *intel_connector; | 181 | struct intel_connector *intel_connector; |
@@ -228,7 +245,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | |||
228 | drm_for_each_connector_iter(connector, &conn_iter) { | 245 | drm_for_each_connector_iter(connector, &conn_iter) { |
229 | struct intel_connector *intel_connector = to_intel_connector(connector); | 246 | struct intel_connector *intel_connector = to_intel_connector(connector); |
230 | 247 | ||
231 | if (intel_connector->encoder->hpd_pin == pin) { | 248 | /* Don't check MST ports, they don't have pins */ |
249 | if (!intel_connector->mst_port && | ||
250 | intel_connector->encoder->hpd_pin == pin) { | ||
232 | if (connector->polled != intel_connector->polled) | 251 | if (connector->polled != intel_connector->polled) |
233 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | 252 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", |
234 | connector->name); | 253 | connector->name); |
@@ -346,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
346 | hpd_event_bits = dev_priv->hotplug.event_bits; | 365 | hpd_event_bits = dev_priv->hotplug.event_bits; |
347 | dev_priv->hotplug.event_bits = 0; | 366 | dev_priv->hotplug.event_bits = 0; |
348 | 367 | ||
349 | /* Disable hotplug on connectors that hit an irq storm. */ | 368 | /* Enable polling for connectors which had HPD IRQ storms */ |
350 | intel_hpd_irq_storm_disable(dev_priv); | 369 | intel_hpd_irq_storm_switch_to_polling(dev_priv); |
351 | 370 | ||
352 | spin_unlock_irq(&dev_priv->irq_lock); | 371 | spin_unlock_irq(&dev_priv->irq_lock); |
353 | 372 | ||
@@ -395,37 +414,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, | |||
395 | struct intel_encoder *encoder; | 414 | struct intel_encoder *encoder; |
396 | bool storm_detected = false; | 415 | bool storm_detected = false; |
397 | bool queue_dig = false, queue_hp = false; | 416 | bool queue_dig = false, queue_hp = false; |
417 | u32 long_hpd_pulse_mask = 0; | ||
418 | u32 short_hpd_pulse_mask = 0; | ||
419 | enum hpd_pin pin; | ||
398 | 420 | ||
399 | if (!pin_mask) | 421 | if (!pin_mask) |
400 | return; | 422 | return; |
401 | 423 | ||
402 | spin_lock(&dev_priv->irq_lock); | 424 | spin_lock(&dev_priv->irq_lock); |
425 | |||
426 | /* | ||
427 | * Determine whether ->hpd_pulse() exists for each pin, and | ||
428 | * whether we have a short or a long pulse. This is needed | ||
429 | * as each pin may have up to two encoders (HDMI and DP) and | ||
430 | * only the one of them (DP) will have ->hpd_pulse(). | ||
431 | */ | ||
403 | for_each_intel_encoder(&dev_priv->drm, encoder) { | 432 | for_each_intel_encoder(&dev_priv->drm, encoder) { |
404 | enum hpd_pin pin = encoder->hpd_pin; | ||
405 | bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); | 433 | bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder); |
434 | enum port port = encoder->port; | ||
435 | bool long_hpd; | ||
406 | 436 | ||
437 | pin = encoder->hpd_pin; | ||
407 | if (!(BIT(pin) & pin_mask)) | 438 | if (!(BIT(pin) & pin_mask)) |
408 | continue; | 439 | continue; |
409 | 440 | ||
410 | if (has_hpd_pulse) { | 441 | if (!has_hpd_pulse) |
411 | bool long_hpd = long_mask & BIT(pin); | 442 | continue; |
412 | enum port port = encoder->port; | ||
413 | 443 | ||
414 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), | 444 | long_hpd = long_mask & BIT(pin); |
415 | long_hpd ? "long" : "short"); | 445 | |
416 | /* | 446 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), |
417 | * For long HPD pulses we want to have the digital queue happen, | 447 | long_hpd ? "long" : "short"); |
418 | * but we still want HPD storm detection to function. | 448 | queue_dig = true; |
419 | */ | 449 | |
420 | queue_dig = true; | 450 | if (long_hpd) { |
421 | if (long_hpd) { | 451 | long_hpd_pulse_mask |= BIT(pin); |
422 | dev_priv->hotplug.long_port_mask |= (1 << port); | 452 | dev_priv->hotplug.long_port_mask |= BIT(port); |
423 | } else { | 453 | } else { |
424 | /* for short HPD just trigger the digital queue */ | 454 | short_hpd_pulse_mask |= BIT(pin); |
425 | dev_priv->hotplug.short_port_mask |= (1 << port); | 455 | dev_priv->hotplug.short_port_mask |= BIT(port); |
426 | continue; | ||
427 | } | ||
428 | } | 456 | } |
457 | } | ||
458 | |||
459 | /* Now process each pin just once */ | ||
460 | for_each_hpd_pin(pin) { | ||
461 | bool long_hpd; | ||
462 | |||
463 | if (!(BIT(pin) & pin_mask)) | ||
464 | continue; | ||
429 | 465 | ||
430 | if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { | 466 | if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { |
431 | /* | 467 | /* |
@@ -442,17 +478,30 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, | |||
442 | if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) | 478 | if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) |
443 | continue; | 479 | continue; |
444 | 480 | ||
445 | if (!has_hpd_pulse) { | 481 | /* |
482 | * Delegate to ->hpd_pulse() if one of the encoders for this | ||
483 | * pin has it, otherwise let the hotplug_work deal with this | ||
484 | * pin directly. | ||
485 | */ | ||
486 | if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { | ||
487 | long_hpd = long_hpd_pulse_mask & BIT(pin); | ||
488 | } else { | ||
446 | dev_priv->hotplug.event_bits |= BIT(pin); | 489 | dev_priv->hotplug.event_bits |= BIT(pin); |
490 | long_hpd = true; | ||
447 | queue_hp = true; | 491 | queue_hp = true; |
448 | } | 492 | } |
449 | 493 | ||
450 | if (intel_hpd_irq_storm_detect(dev_priv, pin)) { | 494 | if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { |
451 | dev_priv->hotplug.event_bits &= ~BIT(pin); | 495 | dev_priv->hotplug.event_bits &= ~BIT(pin); |
452 | storm_detected = true; | 496 | storm_detected = true; |
497 | queue_hp = true; | ||
453 | } | 498 | } |
454 | } | 499 | } |
455 | 500 | ||
501 | /* | ||
502 | * Disable any IRQs that storms were detected on. Polling enablement | ||
503 | * happens later in our hotplug work. | ||
504 | */ | ||
456 | if (storm_detected && dev_priv->display_irqs_enabled) | 505 | if (storm_detected && dev_priv->display_irqs_enabled) |
457 | dev_priv->display.hpd_irq_setup(dev_priv); | 506 | dev_priv->display.hpd_irq_setup(dev_priv); |
458 | spin_unlock(&dev_priv->irq_lock); | 507 | spin_unlock(&dev_priv->irq_lock); |
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 37ef540dd280..bc27b691d824 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c | |||
@@ -108,13 +108,14 @@ fail: | |||
108 | * This function reads status register to verify if HuC | 108 | * This function reads status register to verify if HuC |
109 | * firmware was successfully loaded. | 109 | * firmware was successfully loaded. |
110 | * | 110 | * |
111 | * Returns positive value if HuC firmware is loaded and verified | 111 | * Returns: 1 if HuC firmware is loaded and verified, |
112 | * and -ENODEV if HuC is not present. | 112 | * 0 if HuC firmware is not loaded and -ENODEV if HuC |
113 | * is not present on this platform. | ||
113 | */ | 114 | */ |
114 | int intel_huc_check_status(struct intel_huc *huc) | 115 | int intel_huc_check_status(struct intel_huc *huc) |
115 | { | 116 | { |
116 | struct drm_i915_private *dev_priv = huc_to_i915(huc); | 117 | struct drm_i915_private *dev_priv = huc_to_i915(huc); |
117 | u32 status; | 118 | bool status; |
118 | 119 | ||
119 | if (!HAS_HUC(dev_priv)) | 120 | if (!HAS_HUC(dev_priv)) |
120 | return -ENODEV; | 121 | return -ENODEV; |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index cdf19553ffac..5d5336fbe7b0 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) | |||
297 | lpe_audio_platdev_destroy(dev_priv); | 297 | lpe_audio_platdev_destroy(dev_priv); |
298 | 298 | ||
299 | irq_free_desc(dev_priv->lpe_audio.irq); | 299 | irq_free_desc(dev_priv->lpe_audio.irq); |
300 | } | ||
301 | 300 | ||
301 | dev_priv->lpe_audio.irq = -1; | ||
302 | dev_priv->lpe_audio.platdev = NULL; | ||
303 | } | ||
302 | 304 | ||
303 | /** | 305 | /** |
304 | * intel_lpe_audio_notify() - notify lpe audio event | 306 | * intel_lpe_audio_notify() - notify lpe audio event |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 43957bb37a42..08fd9b12e4d7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, | |||
259 | ce->lrc_desc = desc; | 259 | ce->lrc_desc = desc; |
260 | } | 260 | } |
261 | 261 | ||
262 | static struct i915_priolist * | ||
263 | lookup_priolist(struct intel_engine_cs *engine, int prio) | ||
264 | { | ||
265 | struct intel_engine_execlists * const execlists = &engine->execlists; | ||
266 | struct i915_priolist *p; | ||
267 | struct rb_node **parent, *rb; | ||
268 | bool first = true; | ||
269 | |||
270 | if (unlikely(execlists->no_priolist)) | ||
271 | prio = I915_PRIORITY_NORMAL; | ||
272 | |||
273 | find_priolist: | ||
274 | /* most positive priority is scheduled first, equal priorities fifo */ | ||
275 | rb = NULL; | ||
276 | parent = &execlists->queue.rb_root.rb_node; | ||
277 | while (*parent) { | ||
278 | rb = *parent; | ||
279 | p = to_priolist(rb); | ||
280 | if (prio > p->priority) { | ||
281 | parent = &rb->rb_left; | ||
282 | } else if (prio < p->priority) { | ||
283 | parent = &rb->rb_right; | ||
284 | first = false; | ||
285 | } else { | ||
286 | return p; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | if (prio == I915_PRIORITY_NORMAL) { | ||
291 | p = &execlists->default_priolist; | ||
292 | } else { | ||
293 | p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); | ||
294 | /* Convert an allocation failure to a priority bump */ | ||
295 | if (unlikely(!p)) { | ||
296 | prio = I915_PRIORITY_NORMAL; /* recurses just once */ | ||
297 | |||
298 | /* To maintain ordering with all rendering, after an | ||
299 | * allocation failure we have to disable all scheduling. | ||
300 | * Requests will then be executed in fifo, and schedule | ||
301 | * will ensure that dependencies are emitted in fifo. | ||
302 | * There will be still some reordering with existing | ||
303 | * requests, so if userspace lied about their | ||
304 | * dependencies that reordering may be visible. | ||
305 | */ | ||
306 | execlists->no_priolist = true; | ||
307 | goto find_priolist; | ||
308 | } | ||
309 | } | ||
310 | |||
311 | p->priority = prio; | ||
312 | INIT_LIST_HEAD(&p->requests); | ||
313 | rb_link_node(&p->node, rb, parent); | ||
314 | rb_insert_color_cached(&p->node, &execlists->queue, first); | ||
315 | |||
316 | return p; | ||
317 | } | ||
318 | |||
319 | static void unwind_wa_tail(struct i915_request *rq) | 262 | static void unwind_wa_tail(struct i915_request *rq) |
320 | { | 263 | { |
321 | rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); | 264 | rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); |
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq) | |||
324 | 267 | ||
325 | static void __unwind_incomplete_requests(struct intel_engine_cs *engine) | 268 | static void __unwind_incomplete_requests(struct intel_engine_cs *engine) |
326 | { | 269 | { |
327 | struct i915_request *rq, *rn; | 270 | struct i915_request *rq, *rn, *active = NULL; |
328 | struct i915_priolist *uninitialized_var(p); | 271 | struct list_head *uninitialized_var(pl); |
329 | int last_prio = I915_PRIORITY_INVALID; | 272 | int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT; |
330 | 273 | ||
331 | lockdep_assert_held(&engine->timeline.lock); | 274 | lockdep_assert_held(&engine->timeline.lock); |
332 | 275 | ||
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||
334 | &engine->timeline.requests, | 277 | &engine->timeline.requests, |
335 | link) { | 278 | link) { |
336 | if (i915_request_completed(rq)) | 279 | if (i915_request_completed(rq)) |
337 | return; | 280 | break; |
338 | 281 | ||
339 | __i915_request_unsubmit(rq); | 282 | __i915_request_unsubmit(rq); |
340 | unwind_wa_tail(rq); | 283 | unwind_wa_tail(rq); |
341 | 284 | ||
285 | GEM_BUG_ON(rq->hw_context->active); | ||
286 | |||
342 | GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); | 287 | GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); |
343 | if (rq_prio(rq) != last_prio) { | 288 | if (rq_prio(rq) != prio) { |
344 | last_prio = rq_prio(rq); | 289 | prio = rq_prio(rq); |
345 | p = lookup_priolist(engine, last_prio); | 290 | pl = i915_sched_lookup_priolist(engine, prio); |
346 | } | 291 | } |
292 | GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); | ||
293 | |||
294 | list_add(&rq->sched.link, pl); | ||
347 | 295 | ||
348 | GEM_BUG_ON(p->priority != rq_prio(rq)); | 296 | active = rq; |
349 | list_add(&rq->sched.link, &p->requests); | 297 | } |
298 | |||
299 | /* | ||
300 | * The active request is now effectively the start of a new client | ||
301 | * stream, so give it the equivalent small priority bump to prevent | ||
302 | * it being gazumped a second time by another peer. | ||
303 | */ | ||
304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { | ||
305 | prio |= I915_PRIORITY_NEWCLIENT; | ||
306 | list_move_tail(&active->sched.link, | ||
307 | i915_sched_lookup_priolist(engine, prio)); | ||
350 | } | 308 | } |
351 | } | 309 | } |
352 | 310 | ||
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) | |||
355 | { | 313 | { |
356 | struct intel_engine_cs *engine = | 314 | struct intel_engine_cs *engine = |
357 | container_of(execlists, typeof(*engine), execlists); | 315 | container_of(execlists, typeof(*engine), execlists); |
358 | unsigned long flags; | ||
359 | |||
360 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
361 | 316 | ||
362 | __unwind_incomplete_requests(engine); | 317 | __unwind_incomplete_requests(engine); |
363 | |||
364 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
365 | } | 318 | } |
366 | 319 | ||
367 | static inline void | 320 | static inline void |
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists) | |||
394 | static inline void | 347 | static inline void |
395 | execlists_context_schedule_in(struct i915_request *rq) | 348 | execlists_context_schedule_in(struct i915_request *rq) |
396 | { | 349 | { |
350 | GEM_BUG_ON(rq->hw_context->active); | ||
351 | |||
397 | execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); | 352 | execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); |
398 | intel_engine_context_in(rq->engine); | 353 | intel_engine_context_in(rq->engine); |
354 | rq->hw_context->active = rq->engine; | ||
399 | } | 355 | } |
400 | 356 | ||
401 | static inline void | 357 | static inline void |
402 | execlists_context_schedule_out(struct i915_request *rq, unsigned long status) | 358 | execlists_context_schedule_out(struct i915_request *rq, unsigned long status) |
403 | { | 359 | { |
360 | rq->hw_context->active = NULL; | ||
404 | intel_engine_context_out(rq->engine); | 361 | intel_engine_context_out(rq->engine); |
405 | execlists_context_status_change(rq, status); | 362 | execlists_context_status_change(rq, status); |
406 | trace_i915_request_out(rq); | 363 | trace_i915_request_out(rq); |
@@ -417,21 +374,32 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) | |||
417 | 374 | ||
418 | static u64 execlists_update_context(struct i915_request *rq) | 375 | static u64 execlists_update_context(struct i915_request *rq) |
419 | { | 376 | { |
377 | struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; | ||
420 | struct intel_context *ce = rq->hw_context; | 378 | struct intel_context *ce = rq->hw_context; |
421 | struct i915_hw_ppgtt *ppgtt = | ||
422 | rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt; | ||
423 | u32 *reg_state = ce->lrc_reg_state; | 379 | u32 *reg_state = ce->lrc_reg_state; |
424 | 380 | ||
425 | reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); | 381 | reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); |
426 | 382 | ||
427 | /* True 32b PPGTT with dynamic page allocation: update PDP | 383 | /* |
384 | * True 32b PPGTT with dynamic page allocation: update PDP | ||
428 | * registers and point the unallocated PDPs to scratch page. | 385 | * registers and point the unallocated PDPs to scratch page. |
429 | * PML4 is allocated during ppgtt init, so this is not needed | 386 | * PML4 is allocated during ppgtt init, so this is not needed |
430 | * in 48-bit mode. | 387 | * in 48-bit mode. |
431 | */ | 388 | */ |
432 | if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) | 389 | if (!i915_vm_is_48bit(&ppgtt->vm)) |
433 | execlists_update_context_pdps(ppgtt, reg_state); | 390 | execlists_update_context_pdps(ppgtt, reg_state); |
434 | 391 | ||
392 | /* | ||
393 | * Make sure the context image is complete before we submit it to HW. | ||
394 | * | ||
395 | * Ostensibly, writes (including the WCB) should be flushed prior to | ||
396 | * an uncached write such as our mmio register access, the empirical | ||
397 | * evidence (esp. on Braswell) suggests that the WC write into memory | ||
398 | * may not be visible to the HW prior to the completion of the UC | ||
399 | * register write and that we may begin execution from the context | ||
400 | * before its image is complete leading to invalid PD chasing. | ||
401 | */ | ||
402 | wmb(); | ||
435 | return ce->lrc_desc; | 403 | return ce->lrc_desc; |
436 | } | 404 | } |
437 | 405 | ||
@@ -669,8 +637,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
669 | while ((rb = rb_first_cached(&execlists->queue))) { | 637 | while ((rb = rb_first_cached(&execlists->queue))) { |
670 | struct i915_priolist *p = to_priolist(rb); | 638 | struct i915_priolist *p = to_priolist(rb); |
671 | struct i915_request *rq, *rn; | 639 | struct i915_request *rq, *rn; |
640 | int i; | ||
672 | 641 | ||
673 | list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { | 642 | priolist_for_each_request_consume(rq, rn, p, i) { |
674 | /* | 643 | /* |
675 | * Can we combine this request with the current port? | 644 | * Can we combine this request with the current port? |
676 | * It has to be the same context/ringbuffer and not | 645 | * It has to be the same context/ringbuffer and not |
@@ -689,11 +658,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
689 | * combine this request with the last, then we | 658 | * combine this request with the last, then we |
690 | * are done. | 659 | * are done. |
691 | */ | 660 | */ |
692 | if (port == last_port) { | 661 | if (port == last_port) |
693 | __list_del_many(&p->requests, | ||
694 | &rq->sched.link); | ||
695 | goto done; | 662 | goto done; |
696 | } | ||
697 | 663 | ||
698 | /* | 664 | /* |
699 | * If GVT overrides us we only ever submit | 665 | * If GVT overrides us we only ever submit |
@@ -703,11 +669,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
703 | * request) to the second port. | 669 | * request) to the second port. |
704 | */ | 670 | */ |
705 | if (ctx_single_port_submission(last->hw_context) || | 671 | if (ctx_single_port_submission(last->hw_context) || |
706 | ctx_single_port_submission(rq->hw_context)) { | 672 | ctx_single_port_submission(rq->hw_context)) |
707 | __list_del_many(&p->requests, | ||
708 | &rq->sched.link); | ||
709 | goto done; | 673 | goto done; |
710 | } | ||
711 | 674 | ||
712 | GEM_BUG_ON(last->hw_context == rq->hw_context); | 675 | GEM_BUG_ON(last->hw_context == rq->hw_context); |
713 | 676 | ||
@@ -718,15 +681,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
718 | GEM_BUG_ON(port_isset(port)); | 681 | GEM_BUG_ON(port_isset(port)); |
719 | } | 682 | } |
720 | 683 | ||
721 | INIT_LIST_HEAD(&rq->sched.link); | 684 | list_del_init(&rq->sched.link); |
685 | |||
722 | __i915_request_submit(rq); | 686 | __i915_request_submit(rq); |
723 | trace_i915_request_in(rq, port_index(port, execlists)); | 687 | trace_i915_request_in(rq, port_index(port, execlists)); |
688 | |||
724 | last = rq; | 689 | last = rq; |
725 | submit = true; | 690 | submit = true; |
726 | } | 691 | } |
727 | 692 | ||
728 | rb_erase_cached(&p->node, &execlists->queue); | 693 | rb_erase_cached(&p->node, &execlists->queue); |
729 | INIT_LIST_HEAD(&p->requests); | ||
730 | if (p->priority != I915_PRIORITY_NORMAL) | 694 | if (p->priority != I915_PRIORITY_NORMAL) |
731 | kmem_cache_free(engine->i915->priorities, p); | 695 | kmem_cache_free(engine->i915->priorities, p); |
732 | } | 696 | } |
@@ -861,16 +825,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) | |||
861 | /* Flush the queued requests to the timeline list (for retiring). */ | 825 | /* Flush the queued requests to the timeline list (for retiring). */ |
862 | while ((rb = rb_first_cached(&execlists->queue))) { | 826 | while ((rb = rb_first_cached(&execlists->queue))) { |
863 | struct i915_priolist *p = to_priolist(rb); | 827 | struct i915_priolist *p = to_priolist(rb); |
828 | int i; | ||
864 | 829 | ||
865 | list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { | 830 | priolist_for_each_request_consume(rq, rn, p, i) { |
866 | INIT_LIST_HEAD(&rq->sched.link); | 831 | list_del_init(&rq->sched.link); |
867 | 832 | ||
868 | dma_fence_set_error(&rq->fence, -EIO); | 833 | dma_fence_set_error(&rq->fence, -EIO); |
869 | __i915_request_submit(rq); | 834 | __i915_request_submit(rq); |
870 | } | 835 | } |
871 | 836 | ||
872 | rb_erase_cached(&p->node, &execlists->queue); | 837 | rb_erase_cached(&p->node, &execlists->queue); |
873 | INIT_LIST_HEAD(&p->requests); | ||
874 | if (p->priority != I915_PRIORITY_NORMAL) | 838 | if (p->priority != I915_PRIORITY_NORMAL) |
875 | kmem_cache_free(engine->i915->priorities, p); | 839 | kmem_cache_free(engine->i915->priorities, p); |
876 | } | 840 | } |
@@ -1076,13 +1040,7 @@ static void queue_request(struct intel_engine_cs *engine, | |||
1076 | struct i915_sched_node *node, | 1040 | struct i915_sched_node *node, |
1077 | int prio) | 1041 | int prio) |
1078 | { | 1042 | { |
1079 | list_add_tail(&node->link, | 1043 | list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio)); |
1080 | &lookup_priolist(engine, prio)->requests); | ||
1081 | } | ||
1082 | |||
1083 | static void __update_queue(struct intel_engine_cs *engine, int prio) | ||
1084 | { | ||
1085 | engine->execlists.queue_priority = prio; | ||
1086 | } | 1044 | } |
1087 | 1045 | ||
1088 | static void __submit_queue_imm(struct intel_engine_cs *engine) | 1046 | static void __submit_queue_imm(struct intel_engine_cs *engine) |
@@ -1101,7 +1059,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine) | |||
1101 | static void submit_queue(struct intel_engine_cs *engine, int prio) | 1059 | static void submit_queue(struct intel_engine_cs *engine, int prio) |
1102 | { | 1060 | { |
1103 | if (prio > engine->execlists.queue_priority) { | 1061 | if (prio > engine->execlists.queue_priority) { |
1104 | __update_queue(engine, prio); | 1062 | engine->execlists.queue_priority = prio; |
1105 | __submit_queue_imm(engine); | 1063 | __submit_queue_imm(engine); |
1106 | } | 1064 | } |
1107 | } | 1065 | } |
@@ -1124,139 +1082,6 @@ static void execlists_submit_request(struct i915_request *request) | |||
1124 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | 1082 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
1125 | } | 1083 | } |
1126 | 1084 | ||
1127 | static struct i915_request *sched_to_request(struct i915_sched_node *node) | ||
1128 | { | ||
1129 | return container_of(node, struct i915_request, sched); | ||
1130 | } | ||
1131 | |||
1132 | static struct intel_engine_cs * | ||
1133 | sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) | ||
1134 | { | ||
1135 | struct intel_engine_cs *engine = sched_to_request(node)->engine; | ||
1136 | |||
1137 | GEM_BUG_ON(!locked); | ||
1138 | |||
1139 | if (engine != locked) { | ||
1140 | spin_unlock(&locked->timeline.lock); | ||
1141 | spin_lock(&engine->timeline.lock); | ||
1142 | } | ||
1143 | |||
1144 | return engine; | ||
1145 | } | ||
1146 | |||
1147 | static void execlists_schedule(struct i915_request *request, | ||
1148 | const struct i915_sched_attr *attr) | ||
1149 | { | ||
1150 | struct i915_priolist *uninitialized_var(pl); | ||
1151 | struct intel_engine_cs *engine, *last; | ||
1152 | struct i915_dependency *dep, *p; | ||
1153 | struct i915_dependency stack; | ||
1154 | const int prio = attr->priority; | ||
1155 | LIST_HEAD(dfs); | ||
1156 | |||
1157 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); | ||
1158 | |||
1159 | if (i915_request_completed(request)) | ||
1160 | return; | ||
1161 | |||
1162 | if (prio <= READ_ONCE(request->sched.attr.priority)) | ||
1163 | return; | ||
1164 | |||
1165 | /* Need BKL in order to use the temporary link inside i915_dependency */ | ||
1166 | lockdep_assert_held(&request->i915->drm.struct_mutex); | ||
1167 | |||
1168 | stack.signaler = &request->sched; | ||
1169 | list_add(&stack.dfs_link, &dfs); | ||
1170 | |||
1171 | /* | ||
1172 | * Recursively bump all dependent priorities to match the new request. | ||
1173 | * | ||
1174 | * A naive approach would be to use recursion: | ||
1175 | * static void update_priorities(struct i915_sched_node *node, prio) { | ||
1176 | * list_for_each_entry(dep, &node->signalers_list, signal_link) | ||
1177 | * update_priorities(dep->signal, prio) | ||
1178 | * queue_request(node); | ||
1179 | * } | ||
1180 | * but that may have unlimited recursion depth and so runs a very | ||
1181 | * real risk of overunning the kernel stack. Instead, we build | ||
1182 | * a flat list of all dependencies starting with the current request. | ||
1183 | * As we walk the list of dependencies, we add all of its dependencies | ||
1184 | * to the end of the list (this may include an already visited | ||
1185 | * request) and continue to walk onwards onto the new dependencies. The | ||
1186 | * end result is a topological list of requests in reverse order, the | ||
1187 | * last element in the list is the request we must execute first. | ||
1188 | */ | ||
1189 | list_for_each_entry(dep, &dfs, dfs_link) { | ||
1190 | struct i915_sched_node *node = dep->signaler; | ||
1191 | |||
1192 | /* | ||
1193 | * Within an engine, there can be no cycle, but we may | ||
1194 | * refer to the same dependency chain multiple times | ||
1195 | * (redundant dependencies are not eliminated) and across | ||
1196 | * engines. | ||
1197 | */ | ||
1198 | list_for_each_entry(p, &node->signalers_list, signal_link) { | ||
1199 | GEM_BUG_ON(p == dep); /* no cycles! */ | ||
1200 | |||
1201 | if (i915_sched_node_signaled(p->signaler)) | ||
1202 | continue; | ||
1203 | |||
1204 | GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); | ||
1205 | if (prio > READ_ONCE(p->signaler->attr.priority)) | ||
1206 | list_move_tail(&p->dfs_link, &dfs); | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * If we didn't need to bump any existing priorities, and we haven't | ||
1212 | * yet submitted this request (i.e. there is no potential race with | ||
1213 | * execlists_submit_request()), we can set our own priority and skip | ||
1214 | * acquiring the engine locks. | ||
1215 | */ | ||
1216 | if (request->sched.attr.priority == I915_PRIORITY_INVALID) { | ||
1217 | GEM_BUG_ON(!list_empty(&request->sched.link)); | ||
1218 | request->sched.attr = *attr; | ||
1219 | if (stack.dfs_link.next == stack.dfs_link.prev) | ||
1220 | return; | ||
1221 | __list_del_entry(&stack.dfs_link); | ||
1222 | } | ||
1223 | |||
1224 | last = NULL; | ||
1225 | engine = request->engine; | ||
1226 | spin_lock_irq(&engine->timeline.lock); | ||
1227 | |||
1228 | /* Fifo and depth-first replacement ensure our deps execute before us */ | ||
1229 | list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) { | ||
1230 | struct i915_sched_node *node = dep->signaler; | ||
1231 | |||
1232 | INIT_LIST_HEAD(&dep->dfs_link); | ||
1233 | |||
1234 | engine = sched_lock_engine(node, engine); | ||
1235 | |||
1236 | if (prio <= node->attr.priority) | ||
1237 | continue; | ||
1238 | |||
1239 | node->attr.priority = prio; | ||
1240 | if (!list_empty(&node->link)) { | ||
1241 | if (last != engine) { | ||
1242 | pl = lookup_priolist(engine, prio); | ||
1243 | last = engine; | ||
1244 | } | ||
1245 | GEM_BUG_ON(pl->priority != prio); | ||
1246 | list_move_tail(&node->link, &pl->requests); | ||
1247 | } | ||
1248 | |||
1249 | if (prio > engine->execlists.queue_priority && | ||
1250 | i915_sw_fence_done(&sched_to_request(node)->submit)) { | ||
1251 | /* defer submission until after all of our updates */ | ||
1252 | __update_queue(engine, prio); | ||
1253 | tasklet_hi_schedule(&engine->execlists.tasklet); | ||
1254 | } | ||
1255 | } | ||
1256 | |||
1257 | spin_unlock_irq(&engine->timeline.lock); | ||
1258 | } | ||
1259 | |||
1260 | static void execlists_context_destroy(struct intel_context *ce) | 1085 | static void execlists_context_destroy(struct intel_context *ce) |
1261 | { | 1086 | { |
1262 | GEM_BUG_ON(ce->pin_count); | 1087 | GEM_BUG_ON(ce->pin_count); |
@@ -1272,6 +1097,28 @@ static void execlists_context_destroy(struct intel_context *ce) | |||
1272 | 1097 | ||
1273 | static void execlists_context_unpin(struct intel_context *ce) | 1098 | static void execlists_context_unpin(struct intel_context *ce) |
1274 | { | 1099 | { |
1100 | struct intel_engine_cs *engine; | ||
1101 | |||
1102 | /* | ||
1103 | * The tasklet may still be using a pointer to our state, via an | ||
1104 | * old request. However, since we know we only unpin the context | ||
1105 | * on retirement of the following request, we know that the last | ||
1106 | * request referencing us will have had a completion CS interrupt. | ||
1107 | * If we see that it is still active, it means that the tasklet hasn't | ||
1108 | * had the chance to run yet; let it run before we teardown the | ||
1109 | * reference it may use. | ||
1110 | */ | ||
1111 | engine = READ_ONCE(ce->active); | ||
1112 | if (unlikely(engine)) { | ||
1113 | unsigned long flags; | ||
1114 | |||
1115 | spin_lock_irqsave(&engine->timeline.lock, flags); | ||
1116 | process_csb(engine); | ||
1117 | spin_unlock_irqrestore(&engine->timeline.lock, flags); | ||
1118 | |||
1119 | GEM_BUG_ON(READ_ONCE(ce->active)); | ||
1120 | } | ||
1121 | |||
1275 | i915_gem_context_unpin_hw_id(ce->gem_context); | 1122 | i915_gem_context_unpin_hw_id(ce->gem_context); |
1276 | 1123 | ||
1277 | intel_ring_unpin(ce->ring); | 1124 | intel_ring_unpin(ce->ring); |
@@ -1375,6 +1222,7 @@ execlists_context_pin(struct intel_engine_cs *engine, | |||
1375 | struct intel_context *ce = to_intel_context(ctx, engine); | 1222 | struct intel_context *ce = to_intel_context(ctx, engine); |
1376 | 1223 | ||
1377 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); | 1224 | lockdep_assert_held(&ctx->i915->drm.struct_mutex); |
1225 | GEM_BUG_ON(!ctx->ppgtt); | ||
1378 | 1226 | ||
1379 | if (likely(ce->pin_count++)) | 1227 | if (likely(ce->pin_count++)) |
1380 | return ce; | 1228 | return ce; |
@@ -1679,7 +1527,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) | |||
1679 | unsigned int i; | 1527 | unsigned int i; |
1680 | int ret; | 1528 | int ret; |
1681 | 1529 | ||
1682 | if (GEM_WARN_ON(engine->id != RCS)) | 1530 | if (GEM_DEBUG_WARN_ON(engine->id != RCS)) |
1683 | return -EINVAL; | 1531 | return -EINVAL; |
1684 | 1532 | ||
1685 | switch (INTEL_GEN(engine->i915)) { | 1533 | switch (INTEL_GEN(engine->i915)) { |
@@ -1718,8 +1566,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) | |||
1718 | */ | 1566 | */ |
1719 | for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { | 1567 | for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { |
1720 | wa_bb[i]->offset = batch_ptr - batch; | 1568 | wa_bb[i]->offset = batch_ptr - batch; |
1721 | if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, | 1569 | if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, |
1722 | CACHELINE_BYTES))) { | 1570 | CACHELINE_BYTES))) { |
1723 | ret = -EINVAL; | 1571 | ret = -EINVAL; |
1724 | break; | 1572 | break; |
1725 | } | 1573 | } |
@@ -1902,7 +1750,7 @@ static void execlists_reset(struct intel_engine_cs *engine, | |||
1902 | unsigned long flags; | 1750 | unsigned long flags; |
1903 | u32 *regs; | 1751 | u32 *regs; |
1904 | 1752 | ||
1905 | GEM_TRACE("%s request global=%x, current=%d\n", | 1753 | GEM_TRACE("%s request global=%d, current=%d\n", |
1906 | engine->name, request ? request->global_seqno : 0, | 1754 | engine->name, request ? request->global_seqno : 0, |
1907 | intel_engine_get_seqno(engine)); | 1755 | intel_engine_get_seqno(engine)); |
1908 | 1756 | ||
@@ -2029,8 +1877,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, | |||
2029 | * it is unsafe in case of lite-restore (because the ctx is | 1877 | * it is unsafe in case of lite-restore (because the ctx is |
2030 | * not idle). PML4 is allocated during ppgtt init so this is | 1878 | * not idle). PML4 is allocated during ppgtt init so this is |
2031 | * not needed in 48-bit.*/ | 1879 | * not needed in 48-bit.*/ |
2032 | if (rq->gem_context->ppgtt && | 1880 | if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && |
2033 | (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && | ||
2034 | !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && | 1881 | !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) && |
2035 | !intel_vgpu_active(rq->i915)) { | 1882 | !intel_vgpu_active(rq->i915)) { |
2036 | ret = intel_logical_ring_emit_pdps(rq); | 1883 | ret = intel_logical_ring_emit_pdps(rq); |
@@ -2109,7 +1956,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) | |||
2109 | 1956 | ||
2110 | if (mode & EMIT_INVALIDATE) { | 1957 | if (mode & EMIT_INVALIDATE) { |
2111 | cmd |= MI_INVALIDATE_TLB; | 1958 | cmd |= MI_INVALIDATE_TLB; |
2112 | if (request->engine->id == VCS) | 1959 | if (request->engine->class == VIDEO_DECODE_CLASS) |
2113 | cmd |= MI_INVALIDATE_BSD; | 1960 | cmd |= MI_INVALIDATE_BSD; |
2114 | } | 1961 | } |
2115 | 1962 | ||
@@ -2294,7 +2141,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) | |||
2294 | { | 2141 | { |
2295 | engine->submit_request = execlists_submit_request; | 2142 | engine->submit_request = execlists_submit_request; |
2296 | engine->cancel_requests = execlists_cancel_requests; | 2143 | engine->cancel_requests = execlists_cancel_requests; |
2297 | engine->schedule = execlists_schedule; | 2144 | engine->schedule = i915_schedule; |
2298 | engine->execlists.tasklet.func = execlists_submission_tasklet; | 2145 | engine->execlists.tasklet.func = execlists_submission_tasklet; |
2299 | 2146 | ||
2300 | engine->reset.prepare = execlists_reset_prepare; | 2147 | engine->reset.prepare = execlists_reset_prepare; |
@@ -2632,7 +2479,6 @@ static void execlists_init_reg_state(u32 *regs, | |||
2632 | struct intel_ring *ring) | 2479 | struct intel_ring *ring) |
2633 | { | 2480 | { |
2634 | struct drm_i915_private *dev_priv = engine->i915; | 2481 | struct drm_i915_private *dev_priv = engine->i915; |
2635 | struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt; | ||
2636 | u32 base = engine->mmio_base; | 2482 | u32 base = engine->mmio_base; |
2637 | bool rcs = engine->class == RENDER_CLASS; | 2483 | bool rcs = engine->class == RENDER_CLASS; |
2638 | 2484 | ||
@@ -2704,12 +2550,12 @@ static void execlists_init_reg_state(u32 *regs, | |||
2704 | CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); | 2550 | CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); |
2705 | CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); | 2551 | CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); |
2706 | 2552 | ||
2707 | if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) { | 2553 | if (i915_vm_is_48bit(&ctx->ppgtt->vm)) { |
2708 | /* 64b PPGTT (48bit canonical) | 2554 | /* 64b PPGTT (48bit canonical) |
2709 | * PDP0_DESCRIPTOR contains the base address to PML4 and | 2555 | * PDP0_DESCRIPTOR contains the base address to PML4 and |
2710 | * other PDP Descriptors are ignored. | 2556 | * other PDP Descriptors are ignored. |
2711 | */ | 2557 | */ |
2712 | ASSIGN_CTX_PML4(ppgtt, regs); | 2558 | ASSIGN_CTX_PML4(ctx->ppgtt, regs); |
2713 | } | 2559 | } |
2714 | 2560 | ||
2715 | if (rcs) { | 2561 | if (rcs) { |
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 3e085c5f2b81..96a8d9524b0c 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -27,6 +27,22 @@ | |||
27 | #include <drm/drm_dp_dual_mode_helper.h> | 27 | #include <drm/drm_dp_dual_mode_helper.h> |
28 | #include "intel_drv.h" | 28 | #include "intel_drv.h" |
29 | 29 | ||
30 | /* LSPCON OUI Vendor ID(signatures) */ | ||
31 | #define LSPCON_VENDOR_PARADE_OUI 0x001CF8 | ||
32 | #define LSPCON_VENDOR_MCA_OUI 0x0060AD | ||
33 | |||
34 | /* AUX addresses to write MCA AVI IF */ | ||
35 | #define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0 | ||
36 | #define LSPCON_MCA_AVI_IF_CTRL 0x5DF | ||
37 | #define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0) | ||
38 | #define LSPCON_MCA_AVI_IF_HANDLED (1 << 1) | ||
39 | |||
40 | /* AUX addresses to write Parade AVI IF */ | ||
41 | #define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516 | ||
42 | #define LSPCON_PARADE_AVI_IF_CTRL 0x51E | ||
43 | #define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7) | ||
44 | #define LSPCON_PARADE_AVI_IF_DATA_SIZE 32 | ||
45 | |||
30 | static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) | 46 | static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon) |
31 | { | 47 | { |
32 | struct intel_digital_port *dig_port = | 48 | struct intel_digital_port *dig_port = |
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode) | |||
50 | } | 66 | } |
51 | } | 67 | } |
52 | 68 | ||
69 | static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) | ||
70 | { | ||
71 | struct intel_dp *dp = lspcon_to_intel_dp(lspcon); | ||
72 | struct drm_dp_dpcd_ident *ident; | ||
73 | u32 vendor_oui; | ||
74 | |||
75 | if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { | ||
76 | DRM_ERROR("Can't read description\n"); | ||
77 | return false; | ||
78 | } | ||
79 | |||
80 | ident = &dp->desc.ident; | ||
81 | vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) | | ||
82 | ident->oui[2]; | ||
83 | |||
84 | switch (vendor_oui) { | ||
85 | case LSPCON_VENDOR_MCA_OUI: | ||
86 | lspcon->vendor = LSPCON_VENDOR_MCA; | ||
87 | DRM_DEBUG_KMS("Vendor: Mega Chips\n"); | ||
88 | break; | ||
89 | |||
90 | case LSPCON_VENDOR_PARADE_OUI: | ||
91 | lspcon->vendor = LSPCON_VENDOR_PARADE; | ||
92 | DRM_DEBUG_KMS("Vendor: Parade Tech\n"); | ||
93 | break; | ||
94 | |||
95 | default: | ||
96 | DRM_ERROR("Invalid/Unknown vendor OUI\n"); | ||
97 | return false; | ||
98 | } | ||
99 | |||
100 | return true; | ||
101 | } | ||
102 | |||
53 | static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) | 103 | static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) |
54 | { | 104 | { |
55 | enum drm_lspcon_mode current_mode; | 105 | enum drm_lspcon_mode current_mode; |
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) | |||
130 | return true; | 180 | return true; |
131 | } | 181 | } |
132 | 182 | ||
183 | void lspcon_ycbcr420_config(struct drm_connector *connector, | ||
184 | struct intel_crtc_state *crtc_state) | ||
185 | { | ||
186 | const struct drm_display_info *info = &connector->display_info; | ||
187 | const struct drm_display_mode *adjusted_mode = | ||
188 | &crtc_state->base.adjusted_mode; | ||
189 | |||
190 | if (drm_mode_is_420_only(info, adjusted_mode) && | ||
191 | connector->ycbcr_420_allowed) { | ||
192 | crtc_state->port_clock /= 2; | ||
193 | crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; | ||
194 | crtc_state->lspcon_downsampling = true; | ||
195 | } | ||
196 | } | ||
197 | |||
133 | static bool lspcon_probe(struct intel_lspcon *lspcon) | 198 | static bool lspcon_probe(struct intel_lspcon *lspcon) |
134 | { | 199 | { |
135 | int retry; | 200 | int retry; |
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) | |||
159 | /* Yay ... got a LSPCON device */ | 224 | /* Yay ... got a LSPCON device */ |
160 | DRM_DEBUG_KMS("LSPCON detected\n"); | 225 | DRM_DEBUG_KMS("LSPCON detected\n"); |
161 | lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); | 226 | lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); |
162 | lspcon->active = true; | 227 | |
228 | /* | ||
229 | * In the SW state machine, lets Put LSPCON in PCON mode only. | ||
230 | * In this way, it will work with both HDMI 1.4 sinks as well as HDMI | ||
231 | * 2.0 sinks. | ||
232 | */ | ||
233 | if (lspcon->mode != DRM_LSPCON_MODE_PCON) { | ||
234 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { | ||
235 | DRM_ERROR("LSPCON mode change to PCON failed\n"); | ||
236 | return false; | ||
237 | } | ||
238 | } | ||
163 | return true; | 239 | return true; |
164 | } | 240 | } |
165 | 241 | ||
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) | |||
185 | DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); | 261 | DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); |
186 | } | 262 | } |
187 | 263 | ||
264 | static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) | ||
265 | { | ||
266 | u8 avi_if_ctrl; | ||
267 | u8 retry; | ||
268 | ssize_t ret; | ||
269 | |||
270 | /* Check if LSPCON FW is ready for data */ | ||
271 | for (retry = 0; retry < 5; retry++) { | ||
272 | if (retry) | ||
273 | usleep_range(200, 300); | ||
274 | |||
275 | ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL, | ||
276 | &avi_if_ctrl, 1); | ||
277 | if (ret < 0) { | ||
278 | DRM_ERROR("Failed to read AVI IF control\n"); | ||
279 | return false; | ||
280 | } | ||
281 | |||
282 | if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0) | ||
283 | return true; | ||
284 | } | ||
285 | |||
286 | DRM_ERROR("Parade FW not ready to accept AVI IF\n"); | ||
287 | return false; | ||
288 | } | ||
289 | |||
290 | static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, | ||
291 | uint8_t *avi_buf) | ||
292 | { | ||
293 | u8 avi_if_ctrl; | ||
294 | u8 block_count = 0; | ||
295 | u8 *data; | ||
296 | uint16_t reg; | ||
297 | ssize_t ret; | ||
298 | |||
299 | while (block_count < 4) { | ||
300 | if (!lspcon_parade_fw_ready(aux)) { | ||
301 | DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n", | ||
302 | block_count); | ||
303 | return false; | ||
304 | } | ||
305 | |||
306 | reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET; | ||
307 | data = avi_buf + block_count * 8; | ||
308 | ret = drm_dp_dpcd_write(aux, reg, data, 8); | ||
309 | if (ret < 0) { | ||
310 | DRM_ERROR("Failed to write AVI IF block %d\n", | ||
311 | block_count); | ||
312 | return false; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Once a block of data is written, we have to inform the FW | ||
317 | * about this by writing into avi infoframe control register: | ||
318 | * - set the kickoff bit[7] to 1 | ||
319 | * - write the block no. to bits[1:0] | ||
320 | */ | ||
321 | reg = LSPCON_PARADE_AVI_IF_CTRL; | ||
322 | avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count; | ||
323 | ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1); | ||
324 | if (ret < 0) { | ||
325 | DRM_ERROR("Failed to update (0x%x), block %d\n", | ||
326 | reg, block_count); | ||
327 | return false; | ||
328 | } | ||
329 | |||
330 | block_count++; | ||
331 | } | ||
332 | |||
333 | DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n"); | ||
334 | return true; | ||
335 | } | ||
336 | |||
337 | static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, | ||
338 | const uint8_t *frame, | ||
339 | ssize_t len) | ||
340 | { | ||
341 | uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, }; | ||
342 | |||
343 | /* | ||
344 | * Parade's frames contains 32 bytes of data, divided | ||
345 | * into 4 frames: | ||
346 | * Token byte (first byte of first frame, must be non-zero) | ||
347 | * HB0 to HB2 from AVI IF (3 bytes header) | ||
348 | * PB0 to PB27 from AVI IF (28 bytes data) | ||
349 | * So it should look like this | ||
350 | * first block: | <token> <HB0-HB2> <DB0-DB3> | | ||
351 | * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>| | ||
352 | */ | ||
353 | |||
354 | if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) { | ||
355 | DRM_ERROR("Invalid length of infoframes\n"); | ||
356 | return false; | ||
357 | } | ||
358 | |||
359 | memcpy(&avi_if[1], frame, len); | ||
360 | |||
361 | if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) { | ||
362 | DRM_DEBUG_KMS("Failed to write infoframe blocks\n"); | ||
363 | return false; | ||
364 | } | ||
365 | |||
366 | return true; | ||
367 | } | ||
368 | |||
369 | static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, | ||
370 | const uint8_t *buffer, ssize_t len) | ||
371 | { | ||
372 | int ret; | ||
373 | uint32_t val = 0; | ||
374 | uint32_t retry; | ||
375 | uint16_t reg; | ||
376 | const uint8_t *data = buffer; | ||
377 | |||
378 | reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET; | ||
379 | while (val < len) { | ||
380 | /* DPCD write for AVI IF can fail on a slow FW day, so retry */ | ||
381 | for (retry = 0; retry < 5; retry++) { | ||
382 | ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1); | ||
383 | if (ret == 1) { | ||
384 | break; | ||
385 | } else if (retry < 4) { | ||
386 | mdelay(50); | ||
387 | continue; | ||
388 | } else { | ||
389 | DRM_ERROR("DPCD write failed at:0x%x\n", reg); | ||
390 | return false; | ||
391 | } | ||
392 | } | ||
393 | val++; reg++; data++; | ||
394 | } | ||
395 | |||
396 | val = 0; | ||
397 | reg = LSPCON_MCA_AVI_IF_CTRL; | ||
398 | ret = drm_dp_dpcd_read(aux, reg, &val, 1); | ||
399 | if (ret < 0) { | ||
400 | DRM_ERROR("DPCD read failed, address 0x%x\n", reg); | ||
401 | return false; | ||
402 | } | ||
403 | |||
404 | /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */ | ||
405 | val &= ~LSPCON_MCA_AVI_IF_HANDLED; | ||
406 | val |= LSPCON_MCA_AVI_IF_KICKOFF; | ||
407 | |||
408 | ret = drm_dp_dpcd_write(aux, reg, &val, 1); | ||
409 | if (ret < 0) { | ||
410 | DRM_ERROR("DPCD read failed, address 0x%x\n", reg); | ||
411 | return false; | ||
412 | } | ||
413 | |||
414 | val = 0; | ||
415 | ret = drm_dp_dpcd_read(aux, reg, &val, 1); | ||
416 | if (ret < 0) { | ||
417 | DRM_ERROR("DPCD read failed, address 0x%x\n", reg); | ||
418 | return false; | ||
419 | } | ||
420 | |||
421 | if (val == LSPCON_MCA_AVI_IF_HANDLED) | ||
422 | DRM_DEBUG_KMS("AVI IF handled by FW\n"); | ||
423 | |||
424 | return true; | ||
425 | } | ||
426 | |||
427 | void lspcon_write_infoframe(struct intel_encoder *encoder, | ||
428 | const struct intel_crtc_state *crtc_state, | ||
429 | unsigned int type, | ||
430 | const void *frame, ssize_t len) | ||
431 | { | ||
432 | bool ret; | ||
433 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
434 | struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base); | ||
435 | |||
436 | /* LSPCON only needs AVI IF */ | ||
437 | if (type != HDMI_INFOFRAME_TYPE_AVI) | ||
438 | return; | ||
439 | |||
440 | if (lspcon->vendor == LSPCON_VENDOR_MCA) | ||
441 | ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, | ||
442 | frame, len); | ||
443 | else | ||
444 | ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, | ||
445 | frame, len); | ||
446 | |||
447 | if (!ret) { | ||
448 | DRM_ERROR("Failed to write AVI infoframes\n"); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); | ||
453 | } | ||
454 | |||
455 | void lspcon_set_infoframes(struct intel_encoder *encoder, | ||
456 | bool enable, | ||
457 | const struct intel_crtc_state *crtc_state, | ||
458 | const struct drm_connector_state *conn_state) | ||
459 | { | ||
460 | ssize_t ret; | ||
461 | union hdmi_infoframe frame; | ||
462 | uint8_t buf[VIDEO_DIP_DATA_SIZE]; | ||
463 | struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); | ||
464 | struct intel_lspcon *lspcon = &dig_port->lspcon; | ||
465 | struct intel_dp *intel_dp = &dig_port->dp; | ||
466 | struct drm_connector *connector = &intel_dp->attached_connector->base; | ||
467 | const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode; | ||
468 | bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported; | ||
469 | |||
470 | if (!lspcon->active) { | ||
471 | DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); | ||
472 | return; | ||
473 | } | ||
474 | |||
475 | ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, | ||
476 | mode, is_hdmi2_sink); | ||
477 | if (ret < 0) { | ||
478 | DRM_ERROR("couldn't fill AVI infoframe\n"); | ||
479 | return; | ||
480 | } | ||
481 | |||
482 | if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { | ||
483 | if (crtc_state->lspcon_downsampling) | ||
484 | frame.avi.colorspace = HDMI_COLORSPACE_YUV420; | ||
485 | else | ||
486 | frame.avi.colorspace = HDMI_COLORSPACE_YUV444; | ||
487 | } else { | ||
488 | frame.avi.colorspace = HDMI_COLORSPACE_RGB; | ||
489 | } | ||
490 | |||
491 | drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode, | ||
492 | crtc_state->limited_color_range ? | ||
493 | HDMI_QUANTIZATION_RANGE_LIMITED : | ||
494 | HDMI_QUANTIZATION_RANGE_FULL, | ||
495 | false, is_hdmi2_sink); | ||
496 | |||
497 | ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); | ||
498 | if (ret < 0) { | ||
499 | DRM_ERROR("Failed to pack AVI IF\n"); | ||
500 | return; | ||
501 | } | ||
502 | |||
503 | dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI, | ||
504 | buf, ret); | ||
505 | } | ||
506 | |||
507 | bool lspcon_infoframe_enabled(struct intel_encoder *encoder, | ||
508 | const struct intel_crtc_state *pipe_config) | ||
509 | { | ||
510 | return enc_to_intel_lspcon(&encoder->base)->active; | ||
511 | } | ||
512 | |||
188 | void lspcon_resume(struct intel_lspcon *lspcon) | 513 | void lspcon_resume(struct intel_lspcon *lspcon) |
189 | { | 514 | { |
190 | enum drm_lspcon_mode expected_mode; | 515 | enum drm_lspcon_mode expected_mode; |
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
216 | struct intel_lspcon *lspcon = &intel_dig_port->lspcon; | 541 | struct intel_lspcon *lspcon = &intel_dig_port->lspcon; |
217 | struct drm_device *dev = intel_dig_port->base.base.dev; | 542 | struct drm_device *dev = intel_dig_port->base.base.dev; |
218 | struct drm_i915_private *dev_priv = to_i915(dev); | 543 | struct drm_i915_private *dev_priv = to_i915(dev); |
544 | struct drm_connector *connector = &dp->attached_connector->base; | ||
219 | 545 | ||
220 | if (!HAS_LSPCON(dev_priv)) { | 546 | if (!HAS_LSPCON(dev_priv)) { |
221 | DRM_ERROR("LSPCON is not supported on this platform\n"); | 547 | DRM_ERROR("LSPCON is not supported on this platform\n"); |
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
230 | return false; | 556 | return false; |
231 | } | 557 | } |
232 | 558 | ||
233 | /* | ||
234 | * In the SW state machine, lets Put LSPCON in PCON mode only. | ||
235 | * In this way, it will work with both HDMI 1.4 sinks as well as HDMI | ||
236 | * 2.0 sinks. | ||
237 | */ | ||
238 | if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) { | ||
239 | if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { | ||
240 | DRM_ERROR("LSPCON mode change to PCON failed\n"); | ||
241 | return false; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | if (!intel_dp_read_dpcd(dp)) { | 559 | if (!intel_dp_read_dpcd(dp)) { |
246 | DRM_ERROR("LSPCON DPCD read failed\n"); | 560 | DRM_ERROR("LSPCON DPCD read failed\n"); |
247 | return false; | 561 | return false; |
248 | } | 562 | } |
249 | 563 | ||
250 | drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); | 564 | if (!lspcon_detect_vendor(lspcon)) { |
565 | DRM_ERROR("LSPCON vendor detection failed\n"); | ||
566 | return false; | ||
567 | } | ||
251 | 568 | ||
569 | connector->ycbcr_420_allowed = true; | ||
570 | lspcon->active = true; | ||
252 | DRM_DEBUG_KMS("Success: LSPCON init\n"); | 571 | DRM_DEBUG_KMS("Success: LSPCON init\n"); |
253 | return true; | 572 | return true; |
254 | } | 573 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f9f3b0885ba5..e6c5d985ea0a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -42,10 +42,6 @@ | |||
42 | #include <linux/acpi.h> | 42 | #include <linux/acpi.h> |
43 | 43 | ||
44 | /* Private structure for the integrated LVDS support */ | 44 | /* Private structure for the integrated LVDS support */ |
45 | struct intel_lvds_connector { | ||
46 | struct intel_connector base; | ||
47 | }; | ||
48 | |||
49 | struct intel_lvds_pps { | 45 | struct intel_lvds_pps { |
50 | /* 100us units */ | 46 | /* 100us units */ |
51 | int t1_t2; | 47 | int t1_t2; |
@@ -70,7 +66,7 @@ struct intel_lvds_encoder { | |||
70 | struct intel_lvds_pps init_pps; | 66 | struct intel_lvds_pps init_pps; |
71 | u32 init_lvds_val; | 67 | u32 init_lvds_val; |
72 | 68 | ||
73 | struct intel_lvds_connector *attached_connector; | 69 | struct intel_connector *attached_connector; |
74 | }; | 70 | }; |
75 | 71 | ||
76 | static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) | 72 | static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) |
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder) | |||
78 | return container_of(encoder, struct intel_lvds_encoder, base.base); | 74 | return container_of(encoder, struct intel_lvds_encoder, base.base); |
79 | } | 75 | } |
80 | 76 | ||
81 | static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector) | ||
82 | { | ||
83 | return container_of(connector, struct intel_lvds_connector, base.base); | ||
84 | } | ||
85 | |||
86 | bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, | 77 | bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, |
87 | i915_reg_t lvds_reg, enum pipe *pipe) | 78 | i915_reg_t lvds_reg, enum pipe *pipe) |
88 | { | 79 | { |
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
396 | struct intel_lvds_encoder *lvds_encoder = | 387 | struct intel_lvds_encoder *lvds_encoder = |
397 | to_lvds_encoder(&intel_encoder->base); | 388 | to_lvds_encoder(&intel_encoder->base); |
398 | struct intel_connector *intel_connector = | 389 | struct intel_connector *intel_connector = |
399 | &lvds_encoder->attached_connector->base; | 390 | lvds_encoder->attached_connector; |
400 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 391 | struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
401 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); | 392 | struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); |
402 | unsigned int lvds_bpp; | 393 | unsigned int lvds_bpp; |
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, | |||
418 | pipe_config->pipe_bpp = lvds_bpp; | 409 | pipe_config->pipe_bpp = lvds_bpp; |
419 | } | 410 | } |
420 | 411 | ||
412 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
413 | |||
421 | /* | 414 | /* |
422 | * We have timings from the BIOS for the panel, put them in | 415 | * We have timings from the BIOS for the panel, put them in |
423 | * to the adjusted mode. The CRTC will be set up for this mode, | 416 | * to the adjusted mode. The CRTC will be set up for this mode, |
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force) | |||
461 | */ | 454 | */ |
462 | static int intel_lvds_get_modes(struct drm_connector *connector) | 455 | static int intel_lvds_get_modes(struct drm_connector *connector) |
463 | { | 456 | { |
464 | struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector); | 457 | struct intel_connector *intel_connector = to_intel_connector(connector); |
465 | struct drm_device *dev = connector->dev; | 458 | struct drm_device *dev = connector->dev; |
466 | struct drm_display_mode *mode; | 459 | struct drm_display_mode *mode; |
467 | 460 | ||
468 | /* use cached edid if we have one */ | 461 | /* use cached edid if we have one */ |
469 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) | 462 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
470 | return drm_add_edid_modes(connector, lvds_connector->base.edid); | 463 | return drm_add_edid_modes(connector, intel_connector->edid); |
471 | 464 | ||
472 | mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode); | 465 | mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode); |
473 | if (mode == NULL) | 466 | if (mode == NULL) |
474 | return 0; | 467 | return 0; |
475 | 468 | ||
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector) | |||
477 | return 1; | 470 | return 1; |
478 | } | 471 | } |
479 | 472 | ||
480 | /** | ||
481 | * intel_lvds_destroy - unregister and free LVDS structures | ||
482 | * @connector: connector to free | ||
483 | * | ||
484 | * Unregister the DDC bus for this connector then free the driver private | ||
485 | * structure. | ||
486 | */ | ||
487 | static void intel_lvds_destroy(struct drm_connector *connector) | ||
488 | { | ||
489 | struct intel_lvds_connector *lvds_connector = | ||
490 | to_lvds_connector(connector); | ||
491 | |||
492 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) | ||
493 | kfree(lvds_connector->base.edid); | ||
494 | |||
495 | intel_panel_fini(&lvds_connector->base.panel); | ||
496 | |||
497 | drm_connector_cleanup(connector); | ||
498 | kfree(connector); | ||
499 | } | ||
500 | |||
501 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { | 473 | static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { |
502 | .get_modes = intel_lvds_get_modes, | 474 | .get_modes = intel_lvds_get_modes, |
503 | .mode_valid = intel_lvds_mode_valid, | 475 | .mode_valid = intel_lvds_mode_valid, |
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { | |||
511 | .atomic_set_property = intel_digital_connector_atomic_set_property, | 483 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
512 | .late_register = intel_connector_register, | 484 | .late_register = intel_connector_register, |
513 | .early_unregister = intel_connector_unregister, | 485 | .early_unregister = intel_connector_unregister, |
514 | .destroy = intel_lvds_destroy, | 486 | .destroy = intel_connector_destroy, |
515 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 487 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
516 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | 488 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
517 | }; | 489 | }; |
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) | |||
802 | return i915_modparams.lvds_channel_mode == 2; | 774 | return i915_modparams.lvds_channel_mode == 2; |
803 | 775 | ||
804 | /* single channel LVDS is limited to 112 MHz */ | 776 | /* single channel LVDS is limited to 112 MHz */ |
805 | if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock | 777 | if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999) |
806 | > 112999) | ||
807 | return true; | 778 | return true; |
808 | 779 | ||
809 | if (dmi_check_system(intel_dual_link_lvds)) | 780 | if (dmi_check_system(intel_dual_link_lvds)) |
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) | |||
858 | struct drm_device *dev = &dev_priv->drm; | 829 | struct drm_device *dev = &dev_priv->drm; |
859 | struct intel_lvds_encoder *lvds_encoder; | 830 | struct intel_lvds_encoder *lvds_encoder; |
860 | struct intel_encoder *intel_encoder; | 831 | struct intel_encoder *intel_encoder; |
861 | struct intel_lvds_connector *lvds_connector; | ||
862 | struct intel_connector *intel_connector; | 832 | struct intel_connector *intel_connector; |
863 | struct drm_connector *connector; | 833 | struct drm_connector *connector; |
864 | struct drm_encoder *encoder; | 834 | struct drm_encoder *encoder; |
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) | |||
911 | if (!lvds_encoder) | 881 | if (!lvds_encoder) |
912 | return; | 882 | return; |
913 | 883 | ||
914 | lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL); | 884 | intel_connector = intel_connector_alloc(); |
915 | if (!lvds_connector) { | 885 | if (!intel_connector) { |
916 | kfree(lvds_encoder); | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | if (intel_connector_init(&lvds_connector->base) < 0) { | ||
921 | kfree(lvds_connector); | ||
922 | kfree(lvds_encoder); | 886 | kfree(lvds_encoder); |
923 | return; | 887 | return; |
924 | } | 888 | } |
925 | 889 | ||
926 | lvds_encoder->attached_connector = lvds_connector; | 890 | lvds_encoder->attached_connector = intel_connector; |
927 | 891 | ||
928 | intel_encoder = &lvds_encoder->base; | 892 | intel_encoder = &lvds_encoder->base; |
929 | encoder = &intel_encoder->base; | 893 | encoder = &intel_encoder->base; |
930 | intel_connector = &lvds_connector->base; | ||
931 | connector = &intel_connector->base; | 894 | connector = &intel_connector->base; |
932 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, | 895 | drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, |
933 | DRM_MODE_CONNECTOR_LVDS); | 896 | DRM_MODE_CONNECTOR_LVDS); |
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) | |||
1008 | } else { | 971 | } else { |
1009 | edid = ERR_PTR(-ENOENT); | 972 | edid = ERR_PTR(-ENOENT); |
1010 | } | 973 | } |
1011 | lvds_connector->base.edid = edid; | 974 | intel_connector->edid = edid; |
1012 | 975 | ||
1013 | list_for_each_entry(scan, &connector->probed_modes, head) { | 976 | list_for_each_entry(scan, &connector->probed_modes, head) { |
1014 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { | 977 | if (scan->type & DRM_MODE_TYPE_PREFERRED) { |
@@ -1072,6 +1035,6 @@ failed: | |||
1072 | drm_connector_cleanup(connector); | 1035 | drm_connector_cleanup(connector); |
1073 | drm_encoder_cleanup(encoder); | 1036 | drm_encoder_cleanup(encoder); |
1074 | kfree(lvds_encoder); | 1037 | kfree(lvds_encoder); |
1075 | kfree(lvds_connector); | 1038 | intel_connector_free(intel_connector); |
1076 | return; | 1039 | return; |
1077 | } | 1040 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index e034b4166d32..b8f106d9ecf8 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv) | |||
773 | opregion->acpi->cadl[i] = 0; | 773 | opregion->acpi->cadl[i] = 0; |
774 | } | 774 | } |
775 | 775 | ||
776 | void intel_opregion_register(struct drm_i915_private *dev_priv) | ||
777 | { | ||
778 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
779 | |||
780 | if (!opregion->header) | ||
781 | return; | ||
782 | |||
783 | if (opregion->acpi) { | ||
784 | intel_didl_outputs(dev_priv); | ||
785 | intel_setup_cadls(dev_priv); | ||
786 | |||
787 | /* Notify BIOS we are ready to handle ACPI video ext notifs. | ||
788 | * Right now, all the events are handled by the ACPI video module. | ||
789 | * We don't actually need to do anything with them. */ | ||
790 | opregion->acpi->csts = 0; | ||
791 | opregion->acpi->drdy = 1; | ||
792 | |||
793 | opregion->acpi_notifier.notifier_call = intel_opregion_video_event; | ||
794 | register_acpi_notifier(&opregion->acpi_notifier); | ||
795 | } | ||
796 | |||
797 | if (opregion->asle) { | ||
798 | opregion->asle->tche = ASLE_TCHE_BLC_EN; | ||
799 | opregion->asle->ardy = ASLE_ARDY_READY; | ||
800 | } | ||
801 | } | ||
802 | |||
803 | void intel_opregion_unregister(struct drm_i915_private *dev_priv) | ||
804 | { | ||
805 | struct intel_opregion *opregion = &dev_priv->opregion; | ||
806 | |||
807 | if (!opregion->header) | ||
808 | return; | ||
809 | |||
810 | if (opregion->asle) | ||
811 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; | ||
812 | |||
813 | cancel_work_sync(&dev_priv->opregion.asle_work); | ||
814 | |||
815 | if (opregion->acpi) { | ||
816 | opregion->acpi->drdy = 0; | ||
817 | |||
818 | unregister_acpi_notifier(&opregion->acpi_notifier); | ||
819 | opregion->acpi_notifier.notifier_call = NULL; | ||
820 | } | ||
821 | |||
822 | /* just clear all opregion memory pointers now */ | ||
823 | memunmap(opregion->header); | ||
824 | if (opregion->rvda) { | ||
825 | memunmap(opregion->rvda); | ||
826 | opregion->rvda = NULL; | ||
827 | } | ||
828 | if (opregion->vbt_firmware) { | ||
829 | kfree(opregion->vbt_firmware); | ||
830 | opregion->vbt_firmware = NULL; | ||
831 | } | ||
832 | opregion->header = NULL; | ||
833 | opregion->acpi = NULL; | ||
834 | opregion->swsci = NULL; | ||
835 | opregion->asle = NULL; | ||
836 | opregion->vbt = NULL; | ||
837 | opregion->lid_state = NULL; | ||
838 | } | ||
839 | |||
840 | static void swsci_setup(struct drm_i915_private *dev_priv) | 776 | static void swsci_setup(struct drm_i915_private *dev_priv) |
841 | { | 777 | { |
842 | struct intel_opregion *opregion = &dev_priv->opregion; | 778 | struct intel_opregion *opregion = &dev_priv->opregion; |
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) | |||
1115 | 1051 | ||
1116 | return ret - 1; | 1052 | return ret - 1; |
1117 | } | 1053 | } |
1054 | |||
1055 | void intel_opregion_register(struct drm_i915_private *i915) | ||
1056 | { | ||
1057 | struct intel_opregion *opregion = &i915->opregion; | ||
1058 | |||
1059 | if (!opregion->header) | ||
1060 | return; | ||
1061 | |||
1062 | if (opregion->acpi) { | ||
1063 | opregion->acpi_notifier.notifier_call = | ||
1064 | intel_opregion_video_event; | ||
1065 | register_acpi_notifier(&opregion->acpi_notifier); | ||
1066 | } | ||
1067 | |||
1068 | intel_opregion_resume(i915); | ||
1069 | } | ||
1070 | |||
1071 | void intel_opregion_resume(struct drm_i915_private *i915) | ||
1072 | { | ||
1073 | struct intel_opregion *opregion = &i915->opregion; | ||
1074 | |||
1075 | if (!opregion->header) | ||
1076 | return; | ||
1077 | |||
1078 | if (opregion->acpi) { | ||
1079 | intel_didl_outputs(i915); | ||
1080 | intel_setup_cadls(i915); | ||
1081 | |||
1082 | /* | ||
1083 | * Notify BIOS we are ready to handle ACPI video ext notifs. | ||
1084 | * Right now, all the events are handled by the ACPI video | ||
1085 | * module. We don't actually need to do anything with them. | ||
1086 | */ | ||
1087 | opregion->acpi->csts = 0; | ||
1088 | opregion->acpi->drdy = 1; | ||
1089 | } | ||
1090 | |||
1091 | if (opregion->asle) { | ||
1092 | opregion->asle->tche = ASLE_TCHE_BLC_EN; | ||
1093 | opregion->asle->ardy = ASLE_ARDY_READY; | ||
1094 | } | ||
1095 | |||
1096 | intel_opregion_notify_adapter(i915, PCI_D0); | ||
1097 | } | ||
1098 | |||
1099 | void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state) | ||
1100 | { | ||
1101 | struct intel_opregion *opregion = &i915->opregion; | ||
1102 | |||
1103 | if (!opregion->header) | ||
1104 | return; | ||
1105 | |||
1106 | intel_opregion_notify_adapter(i915, state); | ||
1107 | |||
1108 | if (opregion->asle) | ||
1109 | opregion->asle->ardy = ASLE_ARDY_NOT_READY; | ||
1110 | |||
1111 | cancel_work_sync(&i915->opregion.asle_work); | ||
1112 | |||
1113 | if (opregion->acpi) | ||
1114 | opregion->acpi->drdy = 0; | ||
1115 | } | ||
1116 | |||
1117 | void intel_opregion_unregister(struct drm_i915_private *i915) | ||
1118 | { | ||
1119 | struct intel_opregion *opregion = &i915->opregion; | ||
1120 | |||
1121 | intel_opregion_suspend(i915, PCI_D1); | ||
1122 | |||
1123 | if (!opregion->header) | ||
1124 | return; | ||
1125 | |||
1126 | if (opregion->acpi_notifier.notifier_call) { | ||
1127 | unregister_acpi_notifier(&opregion->acpi_notifier); | ||
1128 | opregion->acpi_notifier.notifier_call = NULL; | ||
1129 | } | ||
1130 | |||
1131 | /* just clear all opregion memory pointers now */ | ||
1132 | memunmap(opregion->header); | ||
1133 | if (opregion->rvda) { | ||
1134 | memunmap(opregion->rvda); | ||
1135 | opregion->rvda = NULL; | ||
1136 | } | ||
1137 | if (opregion->vbt_firmware) { | ||
1138 | kfree(opregion->vbt_firmware); | ||
1139 | opregion->vbt_firmware = NULL; | ||
1140 | } | ||
1141 | opregion->header = NULL; | ||
1142 | opregion->acpi = NULL; | ||
1143 | opregion->swsci = NULL; | ||
1144 | opregion->asle = NULL; | ||
1145 | opregion->vbt = NULL; | ||
1146 | opregion->lid_state = NULL; | ||
1147 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h index e8498a8cda3d..d84b6d2d2fae 100644 --- a/drivers/gpu/drm/i915/intel_opregion.h +++ b/drivers/gpu/drm/i915/intel_opregion.h | |||
@@ -57,8 +57,14 @@ struct intel_opregion { | |||
57 | #ifdef CONFIG_ACPI | 57 | #ifdef CONFIG_ACPI |
58 | 58 | ||
59 | int intel_opregion_setup(struct drm_i915_private *dev_priv); | 59 | int intel_opregion_setup(struct drm_i915_private *dev_priv); |
60 | |||
60 | void intel_opregion_register(struct drm_i915_private *dev_priv); | 61 | void intel_opregion_register(struct drm_i915_private *dev_priv); |
61 | void intel_opregion_unregister(struct drm_i915_private *dev_priv); | 62 | void intel_opregion_unregister(struct drm_i915_private *dev_priv); |
63 | |||
64 | void intel_opregion_resume(struct drm_i915_private *dev_priv); | ||
65 | void intel_opregion_suspend(struct drm_i915_private *dev_priv, | ||
66 | pci_power_t state); | ||
67 | |||
62 | void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); | 68 | void intel_opregion_asle_intr(struct drm_i915_private *dev_priv); |
63 | int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, | 69 | int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, |
64 | bool enable); | 70 | bool enable); |
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) | |||
81 | { | 87 | { |
82 | } | 88 | } |
83 | 89 | ||
90 | void intel_opregion_resume(struct drm_i915_private *dev_priv) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | void intel_opregion_suspend(struct drm_i915_private *dev_priv, | ||
95 | pci_power_t state) | ||
96 | { | ||
97 | } | ||
98 | |||
84 | static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) | 99 | static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv) |
85 | { | 100 | { |
86 | } | 101 | } |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 72eb7e48e8bc..20ea7c99d13a 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -1338,7 +1338,7 @@ err_put_bo: | |||
1338 | return err; | 1338 | return err; |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | void intel_setup_overlay(struct drm_i915_private *dev_priv) | 1341 | void intel_overlay_setup(struct drm_i915_private *dev_priv) |
1342 | { | 1342 | { |
1343 | struct intel_overlay *overlay; | 1343 | struct intel_overlay *overlay; |
1344 | int ret; | 1344 | int ret; |
@@ -1387,7 +1387,7 @@ out_free: | |||
1387 | kfree(overlay); | 1387 | kfree(overlay); |
1388 | } | 1388 | } |
1389 | 1389 | ||
1390 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv) | 1390 | void intel_overlay_cleanup(struct drm_i915_private *dev_priv) |
1391 | { | 1391 | { |
1392 | struct intel_overlay *overlay; | 1392 | struct intel_overlay *overlay; |
1393 | 1393 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 4a9f139e7b73..e6cd7b55c018 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc, | |||
111 | /* Native modes don't need fitting */ | 111 | /* Native modes don't need fitting */ |
112 | if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && | 112 | if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && |
113 | adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && | 113 | adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h && |
114 | !pipe_config->ycbcr420) | 114 | pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) |
115 | goto done; | 115 | goto done; |
116 | 116 | ||
117 | switch (fitting_mode) { | 117 | switch (fitting_mode) { |
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe) | |||
505 | static u32 vlv_get_backlight(struct intel_connector *connector) | 505 | static u32 vlv_get_backlight(struct intel_connector *connector) |
506 | { | 506 | { |
507 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 507 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
508 | enum pipe pipe = intel_get_pipe_from_connector(connector); | 508 | enum pipe pipe = intel_connector_get_pipe(connector); |
509 | 509 | ||
510 | return _vlv_get_backlight(dev_priv, pipe); | 510 | return _vlv_get_backlight(dev_priv, pipe); |
511 | } | 511 | } |
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta | |||
763 | struct intel_panel *panel = &connector->panel; | 763 | struct intel_panel *panel = &connector->panel; |
764 | 764 | ||
765 | /* Disable the backlight */ | 765 | /* Disable the backlight */ |
766 | pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS); | 766 | intel_panel_actually_set_backlight(old_conn_state, 0); |
767 | usleep_range(2000, 3000); | 767 | usleep_range(2000, 3000); |
768 | pwm_disable(panel->backlight.pwm); | 768 | pwm_disable(panel->backlight.pwm); |
769 | } | 769 | } |
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) | |||
1814 | return 0; | 1814 | return 0; |
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | void intel_panel_destroy_backlight(struct drm_connector *connector) | 1817 | static void intel_panel_destroy_backlight(struct intel_panel *panel) |
1818 | { | 1818 | { |
1819 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1820 | struct intel_panel *panel = &intel_connector->panel; | ||
1821 | |||
1822 | /* dispose of the pwm */ | 1819 | /* dispose of the pwm */ |
1823 | if (panel->backlight.pwm) | 1820 | if (panel->backlight.pwm) |
1824 | pwm_put(panel->backlight.pwm); | 1821 | pwm_put(panel->backlight.pwm); |
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel) | |||
1923 | struct intel_connector *intel_connector = | 1920 | struct intel_connector *intel_connector = |
1924 | container_of(panel, struct intel_connector, panel); | 1921 | container_of(panel, struct intel_connector, panel); |
1925 | 1922 | ||
1923 | intel_panel_destroy_backlight(panel); | ||
1924 | |||
1926 | if (panel->fixed_mode) | 1925 | if (panel->fixed_mode) |
1927 | drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); | 1926 | drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode); |
1928 | 1927 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1db9b8328275..897a791662c5 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, | |||
2493 | uint32_t method1, method2; | 2493 | uint32_t method1, method2; |
2494 | int cpp; | 2494 | int cpp; |
2495 | 2495 | ||
2496 | if (mem_value == 0) | ||
2497 | return U32_MAX; | ||
2498 | |||
2496 | if (!intel_wm_plane_visible(cstate, pstate)) | 2499 | if (!intel_wm_plane_visible(cstate, pstate)) |
2497 | return 0; | 2500 | return 0; |
2498 | 2501 | ||
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, | |||
2522 | uint32_t method1, method2; | 2525 | uint32_t method1, method2; |
2523 | int cpp; | 2526 | int cpp; |
2524 | 2527 | ||
2528 | if (mem_value == 0) | ||
2529 | return U32_MAX; | ||
2530 | |||
2525 | if (!intel_wm_plane_visible(cstate, pstate)) | 2531 | if (!intel_wm_plane_visible(cstate, pstate)) |
2526 | return 0; | 2532 | return 0; |
2527 | 2533 | ||
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, | |||
2545 | { | 2551 | { |
2546 | int cpp; | 2552 | int cpp; |
2547 | 2553 | ||
2554 | if (mem_value == 0) | ||
2555 | return U32_MAX; | ||
2556 | |||
2548 | if (!intel_wm_plane_visible(cstate, pstate)) | 2557 | if (!intel_wm_plane_visible(cstate, pstate)) |
2549 | return 0; | 2558 | return 0; |
2550 | 2559 | ||
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, | |||
2881 | * any underrun. If not able to get Dimm info assume 16GB dimm | 2890 | * any underrun. If not able to get Dimm info assume 16GB dimm |
2882 | * to avoid any underrun. | 2891 | * to avoid any underrun. |
2883 | */ | 2892 | */ |
2884 | if (!dev_priv->dram_info.valid_dimm || | 2893 | if (dev_priv->dram_info.is_16gb_dimm) |
2885 | dev_priv->dram_info.is_16gb_dimm) | ||
2886 | wm[0] += 1; | 2894 | wm[0] += 1; |
2887 | 2895 | ||
2888 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { | 2896 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) | |||
3009 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | 3017 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); |
3010 | } | 3018 | } |
3011 | 3019 | ||
3020 | static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) | ||
3021 | { | ||
3022 | /* | ||
3023 | * On some SNB machines (Thinkpad X220 Tablet at least) | ||
3024 | * LP3 usage can cause vblank interrupts to be lost. | ||
3025 | * The DEIIR bit will go high but it looks like the CPU | ||
3026 | * never gets interrupted. | ||
3027 | * | ||
3028 | * It's not clear whether other interrupt source could | ||
3029 | * be affected or if this is somehow limited to vblank | ||
3030 | * interrupts only. To play it safe we disable LP3 | ||
3031 | * watermarks entirely. | ||
3032 | */ | ||
3033 | if (dev_priv->wm.pri_latency[3] == 0 && | ||
3034 | dev_priv->wm.spr_latency[3] == 0 && | ||
3035 | dev_priv->wm.cur_latency[3] == 0) | ||
3036 | return; | ||
3037 | |||
3038 | dev_priv->wm.pri_latency[3] = 0; | ||
3039 | dev_priv->wm.spr_latency[3] = 0; | ||
3040 | dev_priv->wm.cur_latency[3] = 0; | ||
3041 | |||
3042 | DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); | ||
3043 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); | ||
3044 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | ||
3045 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | ||
3046 | } | ||
3047 | |||
3012 | static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) | 3048 | static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) |
3013 | { | 3049 | { |
3014 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); | 3050 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); |
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) | |||
3025 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | 3061 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); |
3026 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | 3062 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); |
3027 | 3063 | ||
3028 | if (IS_GEN6(dev_priv)) | 3064 | if (IS_GEN6(dev_priv)) { |
3029 | snb_wm_latency_quirk(dev_priv); | 3065 | snb_wm_latency_quirk(dev_priv); |
3066 | snb_wm_lp3_irq_quirk(dev_priv); | ||
3067 | } | ||
3030 | } | 3068 | } |
3031 | 3069 | ||
3032 | static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) | 3070 | static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) |
@@ -3160,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev, | |||
3160 | * and after the vblank. | 3198 | * and after the vblank. |
3161 | */ | 3199 | */ |
3162 | *a = newstate->wm.ilk.optimal; | 3200 | *a = newstate->wm.ilk.optimal; |
3163 | if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base)) | 3201 | if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) || |
3202 | intel_state->skip_intermediate_wm) | ||
3164 | return 0; | 3203 | return 0; |
3165 | 3204 | ||
3166 | a->pipe_enabled |= b->pipe_enabled; | 3205 | a->pipe_enabled |= b->pipe_enabled; |
@@ -3612,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state) | |||
3612 | static bool | 3651 | static bool |
3613 | intel_has_sagv(struct drm_i915_private *dev_priv) | 3652 | intel_has_sagv(struct drm_i915_private *dev_priv) |
3614 | { | 3653 | { |
3615 | if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || | 3654 | return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) && |
3616 | IS_CANNONLAKE(dev_priv)) | 3655 | dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; |
3617 | return true; | ||
3618 | |||
3619 | if (IS_SKYLAKE(dev_priv) && | ||
3620 | dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED) | ||
3621 | return true; | ||
3622 | |||
3623 | return false; | ||
3624 | } | 3656 | } |
3625 | 3657 | ||
3626 | /* | 3658 | /* |
@@ -3784,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) | |||
3784 | 3816 | ||
3785 | static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, | 3817 | static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, |
3786 | const struct intel_crtc_state *cstate, | 3818 | const struct intel_crtc_state *cstate, |
3787 | const unsigned int total_data_rate, | 3819 | const u64 total_data_rate, |
3788 | const int num_active, | 3820 | const int num_active, |
3789 | struct skl_ddb_allocation *ddb) | 3821 | struct skl_ddb_allocation *ddb) |
3790 | { | 3822 | { |
@@ -3798,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, | |||
3798 | return ddb_size - 4; /* 4 blocks for bypass path allocation */ | 3830 | return ddb_size - 4; /* 4 blocks for bypass path allocation */ |
3799 | 3831 | ||
3800 | adjusted_mode = &cstate->base.adjusted_mode; | 3832 | adjusted_mode = &cstate->base.adjusted_mode; |
3801 | total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); | 3833 | total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode); |
3802 | 3834 | ||
3803 | /* | 3835 | /* |
3804 | * 12GB/s is maximum BW supported by single DBuf slice. | 3836 | * 12GB/s is maximum BW supported by single DBuf slice. |
3805 | */ | 3837 | */ |
3806 | if (total_data_bw >= GBps(12) || num_active > 1) { | 3838 | if (num_active > 1 || total_data_bw >= GBps(12)) { |
3807 | ddb->enabled_slices = 2; | 3839 | ddb->enabled_slices = 2; |
3808 | } else { | 3840 | } else { |
3809 | ddb->enabled_slices = 1; | 3841 | ddb->enabled_slices = 1; |
@@ -3814,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv, | |||
3814 | } | 3846 | } |
3815 | 3847 | ||
3816 | static void | 3848 | static void |
3817 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | 3849 | skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, |
3818 | const struct intel_crtc_state *cstate, | 3850 | const struct intel_crtc_state *cstate, |
3819 | const unsigned int total_data_rate, | 3851 | const u64 total_data_rate, |
3820 | struct skl_ddb_allocation *ddb, | 3852 | struct skl_ddb_allocation *ddb, |
3821 | struct skl_ddb_entry *alloc, /* out */ | 3853 | struct skl_ddb_entry *alloc, /* out */ |
3822 | int *num_active /* out */) | 3854 | int *num_active /* out */) |
3823 | { | 3855 | { |
3824 | struct drm_atomic_state *state = cstate->base.state; | 3856 | struct drm_atomic_state *state = cstate->base.state; |
3825 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 3857 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
3826 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
3827 | struct drm_crtc *for_crtc = cstate->base.crtc; | 3858 | struct drm_crtc *for_crtc = cstate->base.crtc; |
3828 | const struct drm_crtc_state *crtc_state; | 3859 | const struct drm_crtc_state *crtc_state; |
3829 | const struct drm_crtc *crtc; | 3860 | const struct drm_crtc *crtc; |
@@ -3945,14 +3976,9 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, | |||
3945 | val & PLANE_CTL_ALPHA_MASK); | 3976 | val & PLANE_CTL_ALPHA_MASK); |
3946 | 3977 | ||
3947 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); | 3978 | val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); |
3948 | /* | 3979 | if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) { |
3949 | * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed | ||
3950 | * registers for now. | ||
3951 | */ | ||
3952 | if (INTEL_GEN(dev_priv) < 11) | ||
3953 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); | 3980 | val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); |
3954 | 3981 | ||
3955 | if (fourcc == DRM_FORMAT_NV12) { | ||
3956 | skl_ddb_entry_init_from_hw(dev_priv, | 3982 | skl_ddb_entry_init_from_hw(dev_priv, |
3957 | &ddb->plane[pipe][plane_id], val2); | 3983 | &ddb->plane[pipe][plane_id], val2); |
3958 | skl_ddb_entry_init_from_hw(dev_priv, | 3984 | skl_ddb_entry_init_from_hw(dev_priv, |
@@ -4139,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, | |||
4139 | return 0; | 4165 | return 0; |
4140 | } | 4166 | } |
4141 | 4167 | ||
4142 | static unsigned int | 4168 | static u64 |
4143 | skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, | 4169 | skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, |
4144 | const struct drm_plane_state *pstate, | 4170 | const struct intel_plane_state *intel_pstate, |
4145 | const int plane) | 4171 | const int plane) |
4146 | { | 4172 | { |
4147 | struct intel_plane *intel_plane = to_intel_plane(pstate->plane); | 4173 | struct intel_plane *intel_plane = |
4148 | struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); | 4174 | to_intel_plane(intel_pstate->base.plane); |
4149 | uint32_t data_rate; | 4175 | uint32_t data_rate; |
4150 | uint32_t width = 0, height = 0; | 4176 | uint32_t width = 0, height = 0; |
4151 | struct drm_framebuffer *fb; | 4177 | struct drm_framebuffer *fb; |
4152 | u32 format; | 4178 | u32 format; |
4153 | uint_fixed_16_16_t down_scale_amount; | 4179 | uint_fixed_16_16_t down_scale_amount; |
4180 | u64 rate; | ||
4154 | 4181 | ||
4155 | if (!intel_pstate->base.visible) | 4182 | if (!intel_pstate->base.visible) |
4156 | return 0; | 4183 | return 0; |
4157 | 4184 | ||
4158 | fb = pstate->fb; | 4185 | fb = intel_pstate->base.fb; |
4159 | format = fb->format->format; | 4186 | format = fb->format->format; |
4160 | 4187 | ||
4161 | if (intel_plane->id == PLANE_CURSOR) | 4188 | if (intel_plane->id == PLANE_CURSOR) |
@@ -4177,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, | |||
4177 | height /= 2; | 4204 | height /= 2; |
4178 | } | 4205 | } |
4179 | 4206 | ||
4180 | data_rate = width * height * fb->format->cpp[plane]; | 4207 | data_rate = width * height; |
4181 | 4208 | ||
4182 | down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); | 4209 | down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate); |
4183 | 4210 | ||
4184 | return mul_round_up_u32_fixed16(data_rate, down_scale_amount); | 4211 | rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); |
4212 | |||
4213 | rate *= fb->format->cpp[plane]; | ||
4214 | return rate; | ||
4185 | } | 4215 | } |
4186 | 4216 | ||
4187 | /* | 4217 | static u64 |
4188 | * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching | ||
4189 | * a 8192x4096@32bpp framebuffer: | ||
4190 | * 3 * 4096 * 8192 * 4 < 2^32 | ||
4191 | */ | ||
4192 | static unsigned int | ||
4193 | skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, | 4218 | skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, |
4194 | unsigned int *plane_data_rate, | 4219 | u64 *plane_data_rate, |
4195 | unsigned int *uv_plane_data_rate) | 4220 | u64 *uv_plane_data_rate) |
4196 | { | 4221 | { |
4197 | struct drm_crtc_state *cstate = &intel_cstate->base; | 4222 | struct drm_crtc_state *cstate = &intel_cstate->base; |
4198 | struct drm_atomic_state *state = cstate->state; | 4223 | struct drm_atomic_state *state = cstate->state; |
4199 | struct drm_plane *plane; | 4224 | struct drm_plane *plane; |
4200 | const struct drm_plane_state *pstate; | 4225 | const struct drm_plane_state *pstate; |
4201 | unsigned int total_data_rate = 0; | 4226 | u64 total_data_rate = 0; |
4202 | 4227 | ||
4203 | if (WARN_ON(!state)) | 4228 | if (WARN_ON(!state)) |
4204 | return 0; | 4229 | return 0; |
@@ -4206,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, | |||
4206 | /* Calculate and cache data rate for each plane */ | 4231 | /* Calculate and cache data rate for each plane */ |
4207 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { | 4232 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { |
4208 | enum plane_id plane_id = to_intel_plane(plane)->id; | 4233 | enum plane_id plane_id = to_intel_plane(plane)->id; |
4209 | unsigned int rate; | 4234 | u64 rate; |
4235 | const struct intel_plane_state *intel_pstate = | ||
4236 | to_intel_plane_state(pstate); | ||
4210 | 4237 | ||
4211 | /* packed/y */ | 4238 | /* packed/y */ |
4212 | rate = skl_plane_relative_data_rate(intel_cstate, | 4239 | rate = skl_plane_relative_data_rate(intel_cstate, |
4213 | pstate, 0); | 4240 | intel_pstate, 0); |
4214 | plane_data_rate[plane_id] = rate; | 4241 | plane_data_rate[plane_id] = rate; |
4215 | |||
4216 | total_data_rate += rate; | 4242 | total_data_rate += rate; |
4217 | 4243 | ||
4218 | /* uv-plane */ | 4244 | /* uv-plane */ |
4219 | rate = skl_plane_relative_data_rate(intel_cstate, | 4245 | rate = skl_plane_relative_data_rate(intel_cstate, |
4220 | pstate, 1); | 4246 | intel_pstate, 1); |
4221 | uv_plane_data_rate[plane_id] = rate; | 4247 | uv_plane_data_rate[plane_id] = rate; |
4222 | |||
4223 | total_data_rate += rate; | 4248 | total_data_rate += rate; |
4224 | } | 4249 | } |
4225 | 4250 | ||
4226 | return total_data_rate; | 4251 | return total_data_rate; |
4227 | } | 4252 | } |
4228 | 4253 | ||
4254 | static u64 | ||
4255 | icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate, | ||
4256 | u64 *plane_data_rate) | ||
4257 | { | ||
4258 | struct drm_crtc_state *cstate = &intel_cstate->base; | ||
4259 | struct drm_atomic_state *state = cstate->state; | ||
4260 | struct drm_plane *plane; | ||
4261 | const struct drm_plane_state *pstate; | ||
4262 | u64 total_data_rate = 0; | ||
4263 | |||
4264 | if (WARN_ON(!state)) | ||
4265 | return 0; | ||
4266 | |||
4267 | /* Calculate and cache data rate for each plane */ | ||
4268 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) { | ||
4269 | const struct intel_plane_state *intel_pstate = | ||
4270 | to_intel_plane_state(pstate); | ||
4271 | enum plane_id plane_id = to_intel_plane(plane)->id; | ||
4272 | u64 rate; | ||
4273 | |||
4274 | if (!intel_pstate->linked_plane) { | ||
4275 | rate = skl_plane_relative_data_rate(intel_cstate, | ||
4276 | intel_pstate, 0); | ||
4277 | plane_data_rate[plane_id] = rate; | ||
4278 | total_data_rate += rate; | ||
4279 | } else { | ||
4280 | enum plane_id y_plane_id; | ||
4281 | |||
4282 | /* | ||
4283 | * The slave plane might not iterate in | ||
4284 | * drm_atomic_crtc_state_for_each_plane_state(), | ||
4285 | * and needs the master plane state which may be | ||
4286 | * NULL if we try get_new_plane_state(), so we | ||
4287 | * always calculate from the master. | ||
4288 | */ | ||
4289 | if (intel_pstate->slave) | ||
4290 | continue; | ||
4291 | |||
4292 | /* Y plane rate is calculated on the slave */ | ||
4293 | rate = skl_plane_relative_data_rate(intel_cstate, | ||
4294 | intel_pstate, 0); | ||
4295 | y_plane_id = intel_pstate->linked_plane->id; | ||
4296 | plane_data_rate[y_plane_id] = rate; | ||
4297 | total_data_rate += rate; | ||
4298 | |||
4299 | rate = skl_plane_relative_data_rate(intel_cstate, | ||
4300 | intel_pstate, 1); | ||
4301 | plane_data_rate[plane_id] = rate; | ||
4302 | total_data_rate += rate; | ||
4303 | } | ||
4304 | } | ||
4305 | |||
4306 | return total_data_rate; | ||
4307 | } | ||
4308 | |||
4229 | static uint16_t | 4309 | static uint16_t |
4230 | skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) | 4310 | skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane) |
4231 | { | 4311 | { |
@@ -4298,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active, | |||
4298 | 4378 | ||
4299 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { | 4379 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) { |
4300 | enum plane_id plane_id = to_intel_plane(plane)->id; | 4380 | enum plane_id plane_id = to_intel_plane(plane)->id; |
4381 | struct intel_plane_state *plane_state = to_intel_plane_state(pstate); | ||
4301 | 4382 | ||
4302 | if (plane_id == PLANE_CURSOR) | 4383 | if (plane_id == PLANE_CURSOR) |
4303 | continue; | 4384 | continue; |
4304 | 4385 | ||
4305 | if (!pstate->visible) | 4386 | /* slave plane must be invisible and calculated from master */ |
4387 | if (!pstate->visible || WARN_ON(plane_state->slave)) | ||
4306 | continue; | 4388 | continue; |
4307 | 4389 | ||
4308 | minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); | 4390 | if (!plane_state->linked_plane) { |
4309 | uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); | 4391 | minimum[plane_id] = skl_ddb_min_alloc(pstate, 0); |
4392 | uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); | ||
4393 | } else { | ||
4394 | enum plane_id y_plane_id = | ||
4395 | plane_state->linked_plane->id; | ||
4396 | |||
4397 | minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0); | ||
4398 | minimum[plane_id] = skl_ddb_min_alloc(pstate, 1); | ||
4399 | } | ||
4310 | } | 4400 | } |
4311 | 4401 | ||
4312 | minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); | 4402 | minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active); |
@@ -4318,18 +4408,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4318 | { | 4408 | { |
4319 | struct drm_atomic_state *state = cstate->base.state; | 4409 | struct drm_atomic_state *state = cstate->base.state; |
4320 | struct drm_crtc *crtc = cstate->base.crtc; | 4410 | struct drm_crtc *crtc = cstate->base.crtc; |
4321 | struct drm_device *dev = crtc->dev; | 4411 | struct drm_i915_private *dev_priv = to_i915(crtc->dev); |
4322 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 4412 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
4323 | enum pipe pipe = intel_crtc->pipe; | 4413 | enum pipe pipe = intel_crtc->pipe; |
4324 | struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; | 4414 | struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; |
4325 | uint16_t alloc_size, start; | 4415 | uint16_t alloc_size, start; |
4326 | uint16_t minimum[I915_MAX_PLANES] = {}; | 4416 | uint16_t minimum[I915_MAX_PLANES] = {}; |
4327 | uint16_t uv_minimum[I915_MAX_PLANES] = {}; | 4417 | uint16_t uv_minimum[I915_MAX_PLANES] = {}; |
4328 | unsigned int total_data_rate; | 4418 | u64 total_data_rate; |
4329 | enum plane_id plane_id; | 4419 | enum plane_id plane_id; |
4330 | int num_active; | 4420 | int num_active; |
4331 | unsigned int plane_data_rate[I915_MAX_PLANES] = {}; | 4421 | u64 plane_data_rate[I915_MAX_PLANES] = {}; |
4332 | unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {}; | 4422 | u64 uv_plane_data_rate[I915_MAX_PLANES] = {}; |
4333 | uint16_t total_min_blocks = 0; | 4423 | uint16_t total_min_blocks = 0; |
4334 | 4424 | ||
4335 | /* Clear the partitioning for disabled planes. */ | 4425 | /* Clear the partitioning for disabled planes. */ |
@@ -4344,11 +4434,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4344 | return 0; | 4434 | return 0; |
4345 | } | 4435 | } |
4346 | 4436 | ||
4347 | total_data_rate = skl_get_total_relative_data_rate(cstate, | 4437 | if (INTEL_GEN(dev_priv) < 11) |
4348 | plane_data_rate, | 4438 | total_data_rate = |
4349 | uv_plane_data_rate); | 4439 | skl_get_total_relative_data_rate(cstate, |
4350 | skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, | 4440 | plane_data_rate, |
4351 | alloc, &num_active); | 4441 | uv_plane_data_rate); |
4442 | else | ||
4443 | total_data_rate = | ||
4444 | icl_get_total_relative_data_rate(cstate, | ||
4445 | plane_data_rate); | ||
4446 | |||
4447 | skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate, | ||
4448 | ddb, alloc, &num_active); | ||
4352 | alloc_size = skl_ddb_entry_size(alloc); | 4449 | alloc_size = skl_ddb_entry_size(alloc); |
4353 | if (alloc_size == 0) | 4450 | if (alloc_size == 0) |
4354 | return 0; | 4451 | return 0; |
@@ -4388,7 +4485,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4388 | 4485 | ||
4389 | start = alloc->start; | 4486 | start = alloc->start; |
4390 | for_each_plane_id_on_crtc(intel_crtc, plane_id) { | 4487 | for_each_plane_id_on_crtc(intel_crtc, plane_id) { |
4391 | unsigned int data_rate, uv_data_rate; | 4488 | u64 data_rate, uv_data_rate; |
4392 | uint16_t plane_blocks, uv_plane_blocks; | 4489 | uint16_t plane_blocks, uv_plane_blocks; |
4393 | 4490 | ||
4394 | if (plane_id == PLANE_CURSOR) | 4491 | if (plane_id == PLANE_CURSOR) |
@@ -4402,8 +4499,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4402 | * result is < available as data_rate / total_data_rate < 1 | 4499 | * result is < available as data_rate / total_data_rate < 1 |
4403 | */ | 4500 | */ |
4404 | plane_blocks = minimum[plane_id]; | 4501 | plane_blocks = minimum[plane_id]; |
4405 | plane_blocks += div_u64((uint64_t)alloc_size * data_rate, | 4502 | plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate); |
4406 | total_data_rate); | ||
4407 | 4503 | ||
4408 | /* Leave disabled planes at (0,0) */ | 4504 | /* Leave disabled planes at (0,0) */ |
4409 | if (data_rate) { | 4505 | if (data_rate) { |
@@ -4417,8 +4513,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4417 | uv_data_rate = uv_plane_data_rate[plane_id]; | 4513 | uv_data_rate = uv_plane_data_rate[plane_id]; |
4418 | 4514 | ||
4419 | uv_plane_blocks = uv_minimum[plane_id]; | 4515 | uv_plane_blocks = uv_minimum[plane_id]; |
4420 | uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate, | 4516 | uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate); |
4421 | total_data_rate); | 4517 | |
4518 | /* Gen11+ uses a separate plane for UV watermarks */ | ||
4519 | WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); | ||
4422 | 4520 | ||
4423 | if (uv_data_rate) { | 4521 | if (uv_data_rate) { |
4424 | ddb->uv_plane[pipe][plane_id].start = start; | 4522 | ddb->uv_plane[pipe][plane_id].start = start; |
@@ -4476,7 +4574,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate, | |||
4476 | } | 4574 | } |
4477 | 4575 | ||
4478 | static uint_fixed_16_16_t | 4576 | static uint_fixed_16_16_t |
4479 | intel_get_linetime_us(struct intel_crtc_state *cstate) | 4577 | intel_get_linetime_us(const struct intel_crtc_state *cstate) |
4480 | { | 4578 | { |
4481 | uint32_t pixel_rate; | 4579 | uint32_t pixel_rate; |
4482 | uint32_t crtc_htotal; | 4580 | uint32_t crtc_htotal; |
@@ -4520,7 +4618,7 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, | |||
4520 | 4618 | ||
4521 | static int | 4619 | static int |
4522 | skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | 4620 | skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, |
4523 | struct intel_crtc_state *cstate, | 4621 | const struct intel_crtc_state *cstate, |
4524 | const struct intel_plane_state *intel_pstate, | 4622 | const struct intel_plane_state *intel_pstate, |
4525 | struct skl_wm_params *wp, int plane_id) | 4623 | struct skl_wm_params *wp, int plane_id) |
4526 | { | 4624 | { |
@@ -4627,7 +4725,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, | |||
4627 | } | 4725 | } |
4628 | 4726 | ||
4629 | static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | 4727 | static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, |
4630 | struct intel_crtc_state *cstate, | 4728 | const struct intel_crtc_state *cstate, |
4631 | const struct intel_plane_state *intel_pstate, | 4729 | const struct intel_plane_state *intel_pstate, |
4632 | uint16_t ddb_allocation, | 4730 | uint16_t ddb_allocation, |
4633 | int level, | 4731 | int level, |
@@ -4672,15 +4770,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4672 | } else { | 4770 | } else { |
4673 | if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / | 4771 | if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / |
4674 | wp->dbuf_block_size < 1) && | 4772 | wp->dbuf_block_size < 1) && |
4675 | (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) | 4773 | (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) { |
4676 | selected_result = method2; | 4774 | selected_result = method2; |
4677 | else if (ddb_allocation >= | 4775 | } else if (ddb_allocation >= |
4678 | fixed16_to_u32_round_up(wp->plane_blocks_per_line)) | 4776 | fixed16_to_u32_round_up(wp->plane_blocks_per_line)) { |
4679 | selected_result = min_fixed16(method1, method2); | 4777 | if (IS_GEN9(dev_priv) && |
4680 | else if (latency >= wp->linetime_us) | 4778 | !IS_GEMINILAKE(dev_priv)) |
4681 | selected_result = min_fixed16(method1, method2); | 4779 | selected_result = min_fixed16(method1, method2); |
4682 | else | 4780 | else |
4781 | selected_result = method2; | ||
4782 | } else if (latency >= wp->linetime_us) { | ||
4783 | if (IS_GEN9(dev_priv) && | ||
4784 | !IS_GEMINILAKE(dev_priv)) | ||
4785 | selected_result = min_fixed16(method1, method2); | ||
4786 | else | ||
4787 | selected_result = method2; | ||
4788 | } else { | ||
4683 | selected_result = method1; | 4789 | selected_result = method1; |
4790 | } | ||
4684 | } | 4791 | } |
4685 | 4792 | ||
4686 | res_blocks = fixed16_to_u32_round_up(selected_result) + 1; | 4793 | res_blocks = fixed16_to_u32_round_up(selected_result) + 1; |
@@ -4756,17 +4863,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4756 | } | 4863 | } |
4757 | } | 4864 | } |
4758 | 4865 | ||
4759 | /* | ||
4760 | * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A) | ||
4761 | * disable wm level 1-7 on NV12 planes | ||
4762 | */ | ||
4763 | if (wp->is_planar && level >= 1 && | ||
4764 | (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || | ||
4765 | IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) { | ||
4766 | result->plane_en = false; | ||
4767 | return 0; | ||
4768 | } | ||
4769 | |||
4770 | /* The number of lines are ignored for the level 0 watermark. */ | 4866 | /* The number of lines are ignored for the level 0 watermark. */ |
4771 | result->plane_res_b = res_blocks; | 4867 | result->plane_res_b = res_blocks; |
4772 | result->plane_res_l = res_lines; | 4868 | result->plane_res_l = res_lines; |
@@ -4778,38 +4874,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, | |||
4778 | static int | 4874 | static int |
4779 | skl_compute_wm_levels(const struct drm_i915_private *dev_priv, | 4875 | skl_compute_wm_levels(const struct drm_i915_private *dev_priv, |
4780 | struct skl_ddb_allocation *ddb, | 4876 | struct skl_ddb_allocation *ddb, |
4781 | struct intel_crtc_state *cstate, | 4877 | const struct intel_crtc_state *cstate, |
4782 | const struct intel_plane_state *intel_pstate, | 4878 | const struct intel_plane_state *intel_pstate, |
4879 | uint16_t ddb_blocks, | ||
4783 | const struct skl_wm_params *wm_params, | 4880 | const struct skl_wm_params *wm_params, |
4784 | struct skl_plane_wm *wm, | 4881 | struct skl_plane_wm *wm, |
4785 | int plane_id) | 4882 | struct skl_wm_level *levels) |
4786 | { | 4883 | { |
4787 | struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); | ||
4788 | struct drm_plane *plane = intel_pstate->base.plane; | ||
4789 | struct intel_plane *intel_plane = to_intel_plane(plane); | ||
4790 | uint16_t ddb_blocks; | ||
4791 | enum pipe pipe = intel_crtc->pipe; | ||
4792 | int level, max_level = ilk_wm_max_level(dev_priv); | 4884 | int level, max_level = ilk_wm_max_level(dev_priv); |
4793 | enum plane_id intel_plane_id = intel_plane->id; | 4885 | struct skl_wm_level *result_prev = &levels[0]; |
4794 | int ret; | 4886 | int ret; |
4795 | 4887 | ||
4796 | if (WARN_ON(!intel_pstate->base.fb)) | 4888 | if (WARN_ON(!intel_pstate->base.fb)) |
4797 | return -EINVAL; | 4889 | return -EINVAL; |
4798 | 4890 | ||
4799 | ddb_blocks = plane_id ? | ||
4800 | skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) : | ||
4801 | skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]); | ||
4802 | |||
4803 | for (level = 0; level <= max_level; level++) { | 4891 | for (level = 0; level <= max_level; level++) { |
4804 | struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] : | 4892 | struct skl_wm_level *result = &levels[level]; |
4805 | &wm->wm[level]; | ||
4806 | struct skl_wm_level *result_prev; | ||
4807 | |||
4808 | if (level) | ||
4809 | result_prev = plane_id ? &wm->uv_wm[level - 1] : | ||
4810 | &wm->wm[level - 1]; | ||
4811 | else | ||
4812 | result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0]; | ||
4813 | 4893 | ||
4814 | ret = skl_compute_plane_wm(dev_priv, | 4894 | ret = skl_compute_plane_wm(dev_priv, |
4815 | cstate, | 4895 | cstate, |
@@ -4821,6 +4901,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, | |||
4821 | result); | 4901 | result); |
4822 | if (ret) | 4902 | if (ret) |
4823 | return ret; | 4903 | return ret; |
4904 | |||
4905 | result_prev = result; | ||
4824 | } | 4906 | } |
4825 | 4907 | ||
4826 | if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) | 4908 | if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) |
@@ -4830,7 +4912,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, | |||
4830 | } | 4912 | } |
4831 | 4913 | ||
4832 | static uint32_t | 4914 | static uint32_t |
4833 | skl_compute_linetime_wm(struct intel_crtc_state *cstate) | 4915 | skl_compute_linetime_wm(const struct intel_crtc_state *cstate) |
4834 | { | 4916 | { |
4835 | struct drm_atomic_state *state = cstate->base.state; | 4917 | struct drm_atomic_state *state = cstate->base.state; |
4836 | struct drm_i915_private *dev_priv = to_i915(state->dev); | 4918 | struct drm_i915_private *dev_priv = to_i915(state->dev); |
@@ -4852,7 +4934,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) | |||
4852 | return linetime_wm; | 4934 | return linetime_wm; |
4853 | } | 4935 | } |
4854 | 4936 | ||
4855 | static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | 4937 | static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, |
4856 | struct skl_wm_params *wp, | 4938 | struct skl_wm_params *wp, |
4857 | struct skl_wm_level *wm_l0, | 4939 | struct skl_wm_level *wm_l0, |
4858 | uint16_t ddb_allocation, | 4940 | uint16_t ddb_allocation, |
@@ -4862,7 +4944,7 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | |||
4862 | const struct drm_i915_private *dev_priv = to_i915(dev); | 4944 | const struct drm_i915_private *dev_priv = to_i915(dev); |
4863 | uint16_t trans_min, trans_y_tile_min; | 4945 | uint16_t trans_min, trans_y_tile_min; |
4864 | const uint16_t trans_amount = 10; /* This is configurable amount */ | 4946 | const uint16_t trans_amount = 10; /* This is configurable amount */ |
4865 | uint16_t trans_offset_b, res_blocks; | 4947 | uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; |
4866 | 4948 | ||
4867 | if (!cstate->base.active) | 4949 | if (!cstate->base.active) |
4868 | goto exit; | 4950 | goto exit; |
@@ -4875,19 +4957,31 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, | |||
4875 | if (!dev_priv->ipc_enabled) | 4957 | if (!dev_priv->ipc_enabled) |
4876 | goto exit; | 4958 | goto exit; |
4877 | 4959 | ||
4878 | trans_min = 0; | 4960 | trans_min = 14; |
4879 | if (INTEL_GEN(dev_priv) >= 10) | 4961 | if (INTEL_GEN(dev_priv) >= 11) |
4880 | trans_min = 4; | 4962 | trans_min = 4; |
4881 | 4963 | ||
4882 | trans_offset_b = trans_min + trans_amount; | 4964 | trans_offset_b = trans_min + trans_amount; |
4883 | 4965 | ||
4966 | /* | ||
4967 | * The spec asks for Selected Result Blocks for wm0 (the real value), | ||
4968 | * not Result Blocks (the integer value). Pay attention to the capital | ||
4969 | * letters. The value wm_l0->plane_res_b is actually Result Blocks, but | ||
4970 | * since Result Blocks is the ceiling of Selected Result Blocks plus 1, | ||
4971 | * and since we later will have to get the ceiling of the sum in the | ||
4972 | * transition watermarks calculation, we can just pretend Selected | ||
4973 | * Result Blocks is Result Blocks minus 1 and it should work for the | ||
4974 | * current platforms. | ||
4975 | */ | ||
4976 | wm0_sel_res_b = wm_l0->plane_res_b - 1; | ||
4977 | |||
4884 | if (wp->y_tiled) { | 4978 | if (wp->y_tiled) { |
4885 | trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, | 4979 | trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, |
4886 | wp->y_tile_minimum); | 4980 | wp->y_tile_minimum); |
4887 | res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + | 4981 | res_blocks = max(wm0_sel_res_b, trans_y_tile_min) + |
4888 | trans_offset_b; | 4982 | trans_offset_b; |
4889 | } else { | 4983 | } else { |
4890 | res_blocks = wm_l0->plane_res_b + trans_offset_b; | 4984 | res_blocks = wm0_sel_res_b + trans_offset_b; |
4891 | 4985 | ||
4892 | /* WA BUG:1938466 add one block for non y-tile planes */ | 4986 | /* WA BUG:1938466 add one block for non y-tile planes */ |
4893 | if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) | 4987 | if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) |
@@ -4907,16 +5001,101 @@ exit: | |||
4907 | trans_wm->plane_en = false; | 5001 | trans_wm->plane_en = false; |
4908 | } | 5002 | } |
4909 | 5003 | ||
5004 | static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, | ||
5005 | struct skl_pipe_wm *pipe_wm, | ||
5006 | enum plane_id plane_id, | ||
5007 | const struct intel_crtc_state *cstate, | ||
5008 | const struct intel_plane_state *pstate, | ||
5009 | int color_plane) | ||
5010 | { | ||
5011 | struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev); | ||
5012 | struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; | ||
5013 | enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe; | ||
5014 | struct skl_wm_params wm_params; | ||
5015 | uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); | ||
5016 | int ret; | ||
5017 | |||
5018 | ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, | ||
5019 | &wm_params, color_plane); | ||
5020 | if (ret) | ||
5021 | return ret; | ||
5022 | |||
5023 | ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, | ||
5024 | ddb_blocks, &wm_params, wm, wm->wm); | ||
5025 | |||
5026 | if (ret) | ||
5027 | return ret; | ||
5028 | |||
5029 | skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], | ||
5030 | ddb_blocks, &wm->trans_wm); | ||
5031 | |||
5032 | return 0; | ||
5033 | } | ||
5034 | |||
5035 | static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, | ||
5036 | struct skl_pipe_wm *pipe_wm, | ||
5037 | const struct intel_crtc_state *cstate, | ||
5038 | const struct intel_plane_state *pstate) | ||
5039 | { | ||
5040 | enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id; | ||
5041 | |||
5042 | return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); | ||
5043 | } | ||
5044 | |||
5045 | static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, | ||
5046 | struct skl_pipe_wm *pipe_wm, | ||
5047 | const struct intel_crtc_state *cstate, | ||
5048 | const struct intel_plane_state *pstate) | ||
5049 | { | ||
5050 | struct intel_plane *plane = to_intel_plane(pstate->base.plane); | ||
5051 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | ||
5052 | enum plane_id plane_id = plane->id; | ||
5053 | struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; | ||
5054 | struct skl_wm_params wm_params; | ||
5055 | enum pipe pipe = plane->pipe; | ||
5056 | uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); | ||
5057 | int ret; | ||
5058 | |||
5059 | ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); | ||
5060 | if (ret) | ||
5061 | return ret; | ||
5062 | |||
5063 | /* uv plane watermarks must also be validated for NV12/Planar */ | ||
5064 | ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]); | ||
5065 | |||
5066 | ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1); | ||
5067 | if (ret) | ||
5068 | return ret; | ||
5069 | |||
5070 | return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, | ||
5071 | ddb_blocks, &wm_params, wm, wm->uv_wm); | ||
5072 | } | ||
5073 | |||
5074 | static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, | ||
5075 | struct skl_pipe_wm *pipe_wm, | ||
5076 | const struct intel_crtc_state *cstate, | ||
5077 | const struct intel_plane_state *pstate) | ||
5078 | { | ||
5079 | int ret; | ||
5080 | enum plane_id y_plane_id = pstate->linked_plane->id; | ||
5081 | enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id; | ||
5082 | |||
5083 | ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id, | ||
5084 | cstate, pstate, 0); | ||
5085 | if (ret) | ||
5086 | return ret; | ||
5087 | |||
5088 | return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id, | ||
5089 | cstate, pstate, 1); | ||
5090 | } | ||
5091 | |||
4910 | static int skl_build_pipe_wm(struct intel_crtc_state *cstate, | 5092 | static int skl_build_pipe_wm(struct intel_crtc_state *cstate, |
4911 | struct skl_ddb_allocation *ddb, | 5093 | struct skl_ddb_allocation *ddb, |
4912 | struct skl_pipe_wm *pipe_wm) | 5094 | struct skl_pipe_wm *pipe_wm) |
4913 | { | 5095 | { |
4914 | struct drm_device *dev = cstate->base.crtc->dev; | ||
4915 | struct drm_crtc_state *crtc_state = &cstate->base; | 5096 | struct drm_crtc_state *crtc_state = &cstate->base; |
4916 | const struct drm_i915_private *dev_priv = to_i915(dev); | ||
4917 | struct drm_plane *plane; | 5097 | struct drm_plane *plane; |
4918 | const struct drm_plane_state *pstate; | 5098 | const struct drm_plane_state *pstate; |
4919 | struct skl_plane_wm *wm; | ||
4920 | int ret; | 5099 | int ret; |
4921 | 5100 | ||
4922 | /* | 5101 | /* |
@@ -4928,44 +5107,21 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, | |||
4928 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { | 5107 | drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { |
4929 | const struct intel_plane_state *intel_pstate = | 5108 | const struct intel_plane_state *intel_pstate = |
4930 | to_intel_plane_state(pstate); | 5109 | to_intel_plane_state(pstate); |
4931 | enum plane_id plane_id = to_intel_plane(plane)->id; | ||
4932 | struct skl_wm_params wm_params; | ||
4933 | enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe; | ||
4934 | uint16_t ddb_blocks; | ||
4935 | 5110 | ||
4936 | wm = &pipe_wm->planes[plane_id]; | 5111 | /* Watermarks calculated in master */ |
4937 | ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); | 5112 | if (intel_pstate->slave) |
5113 | continue; | ||
4938 | 5114 | ||
4939 | ret = skl_compute_plane_wm_params(dev_priv, cstate, | 5115 | if (intel_pstate->linked_plane) |
4940 | intel_pstate, &wm_params, 0); | 5116 | ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); |
4941 | if (ret) | 5117 | else if (intel_pstate->base.fb && |
4942 | return ret; | 5118 | intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) |
5119 | ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); | ||
5120 | else | ||
5121 | ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate); | ||
4943 | 5122 | ||
4944 | ret = skl_compute_wm_levels(dev_priv, ddb, cstate, | ||
4945 | intel_pstate, &wm_params, wm, 0); | ||
4946 | if (ret) | 5123 | if (ret) |
4947 | return ret; | 5124 | return ret; |
4948 | |||
4949 | skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], | ||
4950 | ddb_blocks, &wm->trans_wm); | ||
4951 | |||
4952 | /* uv plane watermarks must also be validated for NV12/Planar */ | ||
4953 | if (wm_params.is_planar) { | ||
4954 | memset(&wm_params, 0, sizeof(struct skl_wm_params)); | ||
4955 | wm->is_planar = true; | ||
4956 | |||
4957 | ret = skl_compute_plane_wm_params(dev_priv, cstate, | ||
4958 | intel_pstate, | ||
4959 | &wm_params, 1); | ||
4960 | if (ret) | ||
4961 | return ret; | ||
4962 | |||
4963 | ret = skl_compute_wm_levels(dev_priv, ddb, cstate, | ||
4964 | intel_pstate, &wm_params, | ||
4965 | wm, 1); | ||
4966 | if (ret) | ||
4967 | return ret; | ||
4968 | } | ||
4969 | } | 5125 | } |
4970 | 5126 | ||
4971 | pipe_wm->linetime = skl_compute_linetime_wm(cstate); | 5127 | pipe_wm->linetime = skl_compute_linetime_wm(cstate); |
@@ -5016,14 +5172,7 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | |||
5016 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), | 5172 | skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), |
5017 | &wm->trans_wm); | 5173 | &wm->trans_wm); |
5018 | 5174 | ||
5019 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), | 5175 | if (wm->is_planar && INTEL_GEN(dev_priv) < 11) { |
5020 | &ddb->plane[pipe][plane_id]); | ||
5021 | /* FIXME: add proper NV12 support for ICL. */ | ||
5022 | if (INTEL_GEN(dev_priv) >= 11) | ||
5023 | return skl_ddb_entry_write(dev_priv, | ||
5024 | PLANE_BUF_CFG(pipe, plane_id), | ||
5025 | &ddb->plane[pipe][plane_id]); | ||
5026 | if (wm->is_planar) { | ||
5027 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), | 5176 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), |
5028 | &ddb->uv_plane[pipe][plane_id]); | 5177 | &ddb->uv_plane[pipe][plane_id]); |
5029 | skl_ddb_entry_write(dev_priv, | 5178 | skl_ddb_entry_write(dev_priv, |
@@ -5032,7 +5181,8 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, | |||
5032 | } else { | 5181 | } else { |
5033 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), | 5182 | skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), |
5034 | &ddb->plane[pipe][plane_id]); | 5183 | &ddb->plane[pipe][plane_id]); |
5035 | I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); | 5184 | if (INTEL_GEN(dev_priv) < 11) |
5185 | I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); | ||
5036 | } | 5186 | } |
5037 | } | 5187 | } |
5038 | 5188 | ||
@@ -5076,16 +5226,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, | |||
5076 | return a->start < b->end && b->start < a->end; | 5226 | return a->start < b->end && b->start < a->end; |
5077 | } | 5227 | } |
5078 | 5228 | ||
5079 | bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv, | 5229 | bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, |
5080 | const struct skl_ddb_entry **entries, | 5230 | const struct skl_ddb_entry entries[], |
5081 | const struct skl_ddb_entry *ddb, | 5231 | int num_entries, int ignore_idx) |
5082 | int ignore) | ||
5083 | { | 5232 | { |
5084 | enum pipe pipe; | 5233 | int i; |
5085 | 5234 | ||
5086 | for_each_pipe(dev_priv, pipe) { | 5235 | for (i = 0; i < num_entries; i++) { |
5087 | if (pipe != ignore && entries[pipe] && | 5236 | if (i != ignore_idx && |
5088 | skl_ddb_entries_overlap(ddb, entries[pipe])) | 5237 | skl_ddb_entries_overlap(ddb, &entries[i])) |
5089 | return true; | 5238 | return true; |
5090 | } | 5239 | } |
5091 | 5240 | ||
@@ -5137,11 +5286,12 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) | |||
5137 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); | 5286 | struct intel_atomic_state *intel_state = to_intel_atomic_state(state); |
5138 | struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; | 5287 | struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; |
5139 | struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; | 5288 | struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; |
5140 | struct drm_plane_state *plane_state; | ||
5141 | struct drm_plane *plane; | 5289 | struct drm_plane *plane; |
5142 | enum pipe pipe = intel_crtc->pipe; | 5290 | enum pipe pipe = intel_crtc->pipe; |
5143 | 5291 | ||
5144 | drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { | 5292 | drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { |
5293 | struct drm_plane_state *plane_state; | ||
5294 | struct intel_plane *linked; | ||
5145 | enum plane_id plane_id = to_intel_plane(plane)->id; | 5295 | enum plane_id plane_id = to_intel_plane(plane)->id; |
5146 | 5296 | ||
5147 | if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], | 5297 | if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], |
@@ -5153,6 +5303,15 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) | |||
5153 | plane_state = drm_atomic_get_plane_state(state, plane); | 5303 | plane_state = drm_atomic_get_plane_state(state, plane); |
5154 | if (IS_ERR(plane_state)) | 5304 | if (IS_ERR(plane_state)) |
5155 | return PTR_ERR(plane_state); | 5305 | return PTR_ERR(plane_state); |
5306 | |||
5307 | /* Make sure linked plane is updated too */ | ||
5308 | linked = to_intel_plane_state(plane_state)->linked_plane; | ||
5309 | if (!linked) | ||
5310 | continue; | ||
5311 | |||
5312 | plane_state = drm_atomic_get_plane_state(state, &linked->base); | ||
5313 | if (IS_ERR(plane_state)) | ||
5314 | return PTR_ERR(plane_state); | ||
5156 | } | 5315 | } |
5157 | 5316 | ||
5158 | return 0; | 5317 | return 0; |
@@ -5211,11 +5370,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state) | |||
5211 | if (skl_ddb_entry_equal(old, new)) | 5370 | if (skl_ddb_entry_equal(old, new)) |
5212 | continue; | 5371 | continue; |
5213 | 5372 | ||
5214 | DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", | 5373 | DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", |
5215 | intel_plane->base.base.id, | 5374 | intel_plane->base.base.id, |
5216 | intel_plane->base.name, | 5375 | intel_plane->base.name, |
5217 | old->start, old->end, | 5376 | old->start, old->end, |
5218 | new->start, new->end); | 5377 | new->start, new->end); |
5219 | } | 5378 | } |
5220 | } | 5379 | } |
5221 | } | 5380 | } |
@@ -6117,14 +6276,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv) | |||
6117 | { | 6276 | { |
6118 | u32 val; | 6277 | u32 val; |
6119 | 6278 | ||
6120 | /* Display WA #0477 WaDisableIPC: skl */ | 6279 | if (!HAS_IPC(dev_priv)) |
6121 | if (IS_SKYLAKE(dev_priv)) | 6280 | return; |
6122 | dev_priv->ipc_enabled = false; | ||
6123 | |||
6124 | /* Display WA #1141: SKL:all KBL:all CFL */ | ||
6125 | if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && | ||
6126 | !dev_priv->dram_info.symmetric_memory) | ||
6127 | dev_priv->ipc_enabled = false; | ||
6128 | 6281 | ||
6129 | val = I915_READ(DISP_ARB_CTL2); | 6282 | val = I915_READ(DISP_ARB_CTL2); |
6130 | 6283 | ||
@@ -6138,11 +6291,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv) | |||
6138 | 6291 | ||
6139 | void intel_init_ipc(struct drm_i915_private *dev_priv) | 6292 | void intel_init_ipc(struct drm_i915_private *dev_priv) |
6140 | { | 6293 | { |
6141 | dev_priv->ipc_enabled = false; | ||
6142 | if (!HAS_IPC(dev_priv)) | 6294 | if (!HAS_IPC(dev_priv)) |
6143 | return; | 6295 | return; |
6144 | 6296 | ||
6145 | dev_priv->ipc_enabled = true; | 6297 | /* Display WA #1141: SKL:all KBL:all CFL */ |
6298 | if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) | ||
6299 | dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory; | ||
6300 | else | ||
6301 | dev_priv->ipc_enabled = true; | ||
6302 | |||
6146 | intel_enable_ipc(dev_priv); | 6303 | intel_enable_ipc(dev_priv); |
6147 | } | 6304 | } |
6148 | 6305 | ||
@@ -8736,6 +8893,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) | |||
8736 | /* This is not an Wa. Enable to reduce Sampler power */ | 8893 | /* This is not an Wa. Enable to reduce Sampler power */ |
8737 | I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, | 8894 | I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, |
8738 | I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); | 8895 | I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); |
8896 | |||
8897 | /* WaEnable32PlaneMode:icl */ | ||
8898 | I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, | ||
8899 | _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); | ||
8739 | } | 8900 | } |
8740 | 8901 | ||
8741 | static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) | 8902 | static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) |
@@ -9313,8 +9474,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) | |||
9313 | /* Set up chip specific power management-related functions */ | 9474 | /* Set up chip specific power management-related functions */ |
9314 | void intel_init_pm(struct drm_i915_private *dev_priv) | 9475 | void intel_init_pm(struct drm_i915_private *dev_priv) |
9315 | { | 9476 | { |
9316 | intel_fbc_init(dev_priv); | ||
9317 | |||
9318 | /* For cxsr */ | 9477 | /* For cxsr */ |
9319 | if (IS_PINEVIEW(dev_priv)) | 9478 | if (IS_PINEVIEW(dev_priv)) |
9320 | i915_pineview_get_mem_freq(dev_priv); | 9479 | i915_pineview_get_mem_freq(dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index b6838b525502..54fa17a5596a 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -71,6 +71,10 @@ static bool psr_global_enabled(u32 debug) | |||
71 | static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, | 71 | static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, |
72 | const struct intel_crtc_state *crtc_state) | 72 | const struct intel_crtc_state *crtc_state) |
73 | { | 73 | { |
74 | /* Disable PSR2 by default for all platforms */ | ||
75 | if (i915_modparams.enable_psr == -1) | ||
76 | return false; | ||
77 | |||
74 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { | 78 | switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { |
75 | case I915_PSR_DEBUG_FORCE_PSR1: | 79 | case I915_PSR_DEBUG_FORCE_PSR1: |
76 | return false; | 80 | return false; |
@@ -79,25 +83,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, | |||
79 | } | 83 | } |
80 | } | 84 | } |
81 | 85 | ||
86 | static int edp_psr_shift(enum transcoder cpu_transcoder) | ||
87 | { | ||
88 | switch (cpu_transcoder) { | ||
89 | case TRANSCODER_A: | ||
90 | return EDP_PSR_TRANSCODER_A_SHIFT; | ||
91 | case TRANSCODER_B: | ||
92 | return EDP_PSR_TRANSCODER_B_SHIFT; | ||
93 | case TRANSCODER_C: | ||
94 | return EDP_PSR_TRANSCODER_C_SHIFT; | ||
95 | default: | ||
96 | MISSING_CASE(cpu_transcoder); | ||
97 | /* fallthrough */ | ||
98 | case TRANSCODER_EDP: | ||
99 | return EDP_PSR_TRANSCODER_EDP_SHIFT; | ||
100 | } | ||
101 | } | ||
102 | |||
82 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) | 103 | void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) |
83 | { | 104 | { |
84 | u32 debug_mask, mask; | 105 | u32 debug_mask, mask; |
106 | enum transcoder cpu_transcoder; | ||
107 | u32 transcoders = BIT(TRANSCODER_EDP); | ||
108 | |||
109 | if (INTEL_GEN(dev_priv) >= 8) | ||
110 | transcoders |= BIT(TRANSCODER_A) | | ||
111 | BIT(TRANSCODER_B) | | ||
112 | BIT(TRANSCODER_C); | ||
85 | 113 | ||
86 | mask = EDP_PSR_ERROR(TRANSCODER_EDP); | 114 | debug_mask = 0; |
87 | debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) | | 115 | mask = 0; |
88 | EDP_PSR_PRE_ENTRY(TRANSCODER_EDP); | 116 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { |
89 | 117 | int shift = edp_psr_shift(cpu_transcoder); | |
90 | if (INTEL_GEN(dev_priv) >= 8) { | 118 | |
91 | mask |= EDP_PSR_ERROR(TRANSCODER_A) | | 119 | mask |= EDP_PSR_ERROR(shift); |
92 | EDP_PSR_ERROR(TRANSCODER_B) | | 120 | debug_mask |= EDP_PSR_POST_EXIT(shift) | |
93 | EDP_PSR_ERROR(TRANSCODER_C); | 121 | EDP_PSR_PRE_ENTRY(shift); |
94 | |||
95 | debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) | | ||
96 | EDP_PSR_PRE_ENTRY(TRANSCODER_A) | | ||
97 | EDP_PSR_POST_EXIT(TRANSCODER_B) | | ||
98 | EDP_PSR_PRE_ENTRY(TRANSCODER_B) | | ||
99 | EDP_PSR_POST_EXIT(TRANSCODER_C) | | ||
100 | EDP_PSR_PRE_ENTRY(TRANSCODER_C); | ||
101 | } | 122 | } |
102 | 123 | ||
103 | if (debug & I915_PSR_DEBUG_IRQ) | 124 | if (debug & I915_PSR_DEBUG_IRQ) |
@@ -155,18 +176,20 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) | |||
155 | BIT(TRANSCODER_C); | 176 | BIT(TRANSCODER_C); |
156 | 177 | ||
157 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { | 178 | for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { |
179 | int shift = edp_psr_shift(cpu_transcoder); | ||
180 | |||
158 | /* FIXME: Exit PSR and link train manually when this happens. */ | 181 | /* FIXME: Exit PSR and link train manually when this happens. */ |
159 | if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) | 182 | if (psr_iir & EDP_PSR_ERROR(shift)) |
160 | DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", | 183 | DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n", |
161 | transcoder_name(cpu_transcoder)); | 184 | transcoder_name(cpu_transcoder)); |
162 | 185 | ||
163 | if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { | 186 | if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { |
164 | dev_priv->psr.last_entry_attempt = time_ns; | 187 | dev_priv->psr.last_entry_attempt = time_ns; |
165 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", | 188 | DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", |
166 | transcoder_name(cpu_transcoder)); | 189 | transcoder_name(cpu_transcoder)); |
167 | } | 190 | } |
168 | 191 | ||
169 | if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { | 192 | if (psr_iir & EDP_PSR_POST_EXIT(shift)) { |
170 | dev_priv->psr.last_exit = time_ns; | 193 | dev_priv->psr.last_exit = time_ns; |
171 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", | 194 | DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", |
172 | transcoder_name(cpu_transcoder)); | 195 | transcoder_name(cpu_transcoder)); |
@@ -294,7 +317,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp, | |||
294 | psr_vsc.sdp_header.HB3 = 0x8; | 317 | psr_vsc.sdp_header.HB3 = 0x8; |
295 | } | 318 | } |
296 | 319 | ||
297 | intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state, | 320 | intel_dig_port->write_infoframe(&intel_dig_port->base, |
321 | crtc_state, | ||
298 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); | 322 | DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc)); |
299 | } | 323 | } |
300 | 324 | ||
@@ -553,11 +577,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp) | |||
553 | dev_priv->psr.active = true; | 577 | dev_priv->psr.active = true; |
554 | } | 578 | } |
555 | 579 | ||
580 | static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, | ||
581 | enum transcoder cpu_transcoder) | ||
582 | { | ||
583 | static const i915_reg_t regs[] = { | ||
584 | [TRANSCODER_A] = CHICKEN_TRANS_A, | ||
585 | [TRANSCODER_B] = CHICKEN_TRANS_B, | ||
586 | [TRANSCODER_C] = CHICKEN_TRANS_C, | ||
587 | [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, | ||
588 | }; | ||
589 | |||
590 | WARN_ON(INTEL_GEN(dev_priv) < 9); | ||
591 | |||
592 | if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || | ||
593 | !regs[cpu_transcoder].reg)) | ||
594 | cpu_transcoder = TRANSCODER_A; | ||
595 | |||
596 | return regs[cpu_transcoder]; | ||
597 | } | ||
598 | |||
556 | static void intel_psr_enable_source(struct intel_dp *intel_dp, | 599 | static void intel_psr_enable_source(struct intel_dp *intel_dp, |
557 | const struct intel_crtc_state *crtc_state) | 600 | const struct intel_crtc_state *crtc_state) |
558 | { | 601 | { |
559 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 602 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
560 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; | 603 | enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
604 | u32 mask; | ||
561 | 605 | ||
562 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ | 606 | /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+ |
563 | * use hardcoded values PSR AUX transactions | 607 | * use hardcoded values PSR AUX transactions |
@@ -566,37 +610,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, | |||
566 | hsw_psr_setup_aux(intel_dp); | 610 | hsw_psr_setup_aux(intel_dp); |
567 | 611 | ||
568 | if (dev_priv->psr.psr2_enabled) { | 612 | if (dev_priv->psr.psr2_enabled) { |
569 | u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder)); | 613 | i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, |
614 | cpu_transcoder); | ||
615 | u32 chicken = I915_READ(reg); | ||
570 | 616 | ||
571 | if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) | 617 | if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) |
572 | chicken |= (PSR2_VSC_ENABLE_PROG_HEADER | 618 | chicken |= (PSR2_VSC_ENABLE_PROG_HEADER |
573 | | PSR2_ADD_VERTICAL_LINE_COUNT); | 619 | | PSR2_ADD_VERTICAL_LINE_COUNT); |
574 | 620 | ||
575 | else | 621 | else |
576 | chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; | 622 | chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL; |
577 | I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); | 623 | I915_WRITE(reg, chicken); |
578 | |||
579 | I915_WRITE(EDP_PSR_DEBUG, | ||
580 | EDP_PSR_DEBUG_MASK_MEMUP | | ||
581 | EDP_PSR_DEBUG_MASK_HPD | | ||
582 | EDP_PSR_DEBUG_MASK_LPSP | | ||
583 | EDP_PSR_DEBUG_MASK_MAX_SLEEP | | ||
584 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); | ||
585 | } else { | ||
586 | /* | ||
587 | * Per Spec: Avoid continuous PSR exit by masking MEMUP | ||
588 | * and HPD. also mask LPSP to avoid dependency on other | ||
589 | * drivers that might block runtime_pm besides | ||
590 | * preventing other hw tracking issues now we can rely | ||
591 | * on frontbuffer tracking. | ||
592 | */ | ||
593 | I915_WRITE(EDP_PSR_DEBUG, | ||
594 | EDP_PSR_DEBUG_MASK_MEMUP | | ||
595 | EDP_PSR_DEBUG_MASK_HPD | | ||
596 | EDP_PSR_DEBUG_MASK_LPSP | | ||
597 | EDP_PSR_DEBUG_MASK_DISP_REG_WRITE | | ||
598 | EDP_PSR_DEBUG_MASK_MAX_SLEEP); | ||
599 | } | 624 | } |
625 | |||
626 | /* | ||
627 | * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also | ||
628 | * mask LPSP to avoid dependency on other drivers that might block | ||
629 | * runtime_pm besides preventing other hw tracking issues now we | ||
630 | * can rely on frontbuffer tracking. | ||
631 | */ | ||
632 | mask = EDP_PSR_DEBUG_MASK_MEMUP | | ||
633 | EDP_PSR_DEBUG_MASK_HPD | | ||
634 | EDP_PSR_DEBUG_MASK_LPSP | | ||
635 | EDP_PSR_DEBUG_MASK_MAX_SLEEP; | ||
636 | |||
637 | if (INTEL_GEN(dev_priv) < 11) | ||
638 | mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; | ||
639 | |||
640 | I915_WRITE(EDP_PSR_DEBUG, mask); | ||
600 | } | 641 | } |
601 | 642 | ||
602 | static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, | 643 | static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, |
@@ -656,49 +697,34 @@ unlock: | |||
656 | mutex_unlock(&dev_priv->psr.lock); | 697 | mutex_unlock(&dev_priv->psr.lock); |
657 | } | 698 | } |
658 | 699 | ||
659 | static void | 700 | static void intel_psr_exit(struct drm_i915_private *dev_priv) |
660 | intel_psr_disable_source(struct intel_dp *intel_dp) | ||
661 | { | 701 | { |
662 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 702 | u32 val; |
663 | |||
664 | if (dev_priv->psr.active) { | ||
665 | i915_reg_t psr_status; | ||
666 | u32 psr_status_mask; | ||
667 | |||
668 | if (dev_priv->psr.psr2_enabled) { | ||
669 | psr_status = EDP_PSR2_STATUS; | ||
670 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; | ||
671 | |||
672 | I915_WRITE(EDP_PSR2_CTL, | ||
673 | I915_READ(EDP_PSR2_CTL) & | ||
674 | ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); | ||
675 | |||
676 | } else { | ||
677 | psr_status = EDP_PSR_STATUS; | ||
678 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; | ||
679 | |||
680 | I915_WRITE(EDP_PSR_CTL, | ||
681 | I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); | ||
682 | } | ||
683 | 703 | ||
684 | /* Wait till PSR is idle */ | 704 | if (!dev_priv->psr.active) { |
685 | if (intel_wait_for_register(dev_priv, | 705 | if (INTEL_GEN(dev_priv) >= 9) |
686 | psr_status, psr_status_mask, 0, | 706 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); |
687 | 2000)) | 707 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); |
688 | DRM_ERROR("Timed out waiting for PSR Idle State\n"); | 708 | return; |
709 | } | ||
689 | 710 | ||
690 | dev_priv->psr.active = false; | 711 | if (dev_priv->psr.psr2_enabled) { |
712 | val = I915_READ(EDP_PSR2_CTL); | ||
713 | WARN_ON(!(val & EDP_PSR2_ENABLE)); | ||
714 | I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); | ||
691 | } else { | 715 | } else { |
692 | if (dev_priv->psr.psr2_enabled) | 716 | val = I915_READ(EDP_PSR_CTL); |
693 | WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); | 717 | WARN_ON(!(val & EDP_PSR_ENABLE)); |
694 | else | 718 | I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); |
695 | WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); | ||
696 | } | 719 | } |
720 | dev_priv->psr.active = false; | ||
697 | } | 721 | } |
698 | 722 | ||
699 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) | 723 | static void intel_psr_disable_locked(struct intel_dp *intel_dp) |
700 | { | 724 | { |
701 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); | 725 | struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); |
726 | i915_reg_t psr_status; | ||
727 | u32 psr_status_mask; | ||
702 | 728 | ||
703 | lockdep_assert_held(&dev_priv->psr.lock); | 729 | lockdep_assert_held(&dev_priv->psr.lock); |
704 | 730 | ||
@@ -707,7 +733,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) | |||
707 | 733 | ||
708 | DRM_DEBUG_KMS("Disabling PSR%s\n", | 734 | DRM_DEBUG_KMS("Disabling PSR%s\n", |
709 | dev_priv->psr.psr2_enabled ? "2" : "1"); | 735 | dev_priv->psr.psr2_enabled ? "2" : "1"); |
710 | intel_psr_disable_source(intel_dp); | 736 | |
737 | intel_psr_exit(dev_priv); | ||
738 | |||
739 | if (dev_priv->psr.psr2_enabled) { | ||
740 | psr_status = EDP_PSR2_STATUS; | ||
741 | psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; | ||
742 | } else { | ||
743 | psr_status = EDP_PSR_STATUS; | ||
744 | psr_status_mask = EDP_PSR_STATUS_STATE_MASK; | ||
745 | } | ||
746 | |||
747 | /* Wait till PSR is idle */ | ||
748 | if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0, | ||
749 | 2000)) | ||
750 | DRM_ERROR("Timed out waiting PSR idle state\n"); | ||
711 | 751 | ||
712 | /* Disable PSR on Sink */ | 752 | /* Disable PSR on Sink */ |
713 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); | 753 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); |
@@ -925,25 +965,6 @@ unlock: | |||
925 | mutex_unlock(&dev_priv->psr.lock); | 965 | mutex_unlock(&dev_priv->psr.lock); |
926 | } | 966 | } |
927 | 967 | ||
928 | static void intel_psr_exit(struct drm_i915_private *dev_priv) | ||
929 | { | ||
930 | u32 val; | ||
931 | |||
932 | if (!dev_priv->psr.active) | ||
933 | return; | ||
934 | |||
935 | if (dev_priv->psr.psr2_enabled) { | ||
936 | val = I915_READ(EDP_PSR2_CTL); | ||
937 | WARN_ON(!(val & EDP_PSR2_ENABLE)); | ||
938 | I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); | ||
939 | } else { | ||
940 | val = I915_READ(EDP_PSR_CTL); | ||
941 | WARN_ON(!(val & EDP_PSR_ENABLE)); | ||
942 | I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); | ||
943 | } | ||
944 | dev_priv->psr.active = false; | ||
945 | } | ||
946 | |||
947 | /** | 968 | /** |
948 | * intel_psr_invalidate - Invalidade PSR | 969 | * intel_psr_invalidate - Invalidade PSR |
949 | * @dev_priv: i915 device | 970 | * @dev_priv: i915 device |
@@ -1026,20 +1047,16 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, | |||
1026 | 1047 | ||
1027 | /* By definition flush = invalidate + flush */ | 1048 | /* By definition flush = invalidate + flush */ |
1028 | if (frontbuffer_bits) { | 1049 | if (frontbuffer_bits) { |
1029 | if (dev_priv->psr.psr2_enabled) { | 1050 | /* |
1030 | intel_psr_exit(dev_priv); | 1051 | * Display WA #0884: all |
1031 | } else { | 1052 | * This documented WA for bxt can be safely applied |
1032 | /* | 1053 | * broadly so we can force HW tracking to exit PSR |
1033 | * Display WA #0884: all | 1054 | * instead of disabling and re-enabling. |
1034 | * This documented WA for bxt can be safely applied | 1055 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, |
1035 | * broadly so we can force HW tracking to exit PSR | 1056 | * but it makes more sense write to the current active |
1036 | * instead of disabling and re-enabling. | 1057 | * pipe. |
1037 | * Workaround tells us to write 0 to CUR_SURFLIVE_A, | 1058 | */ |
1038 | * but it makes more sense write to the current active | 1059 | I915_WRITE(CURSURFLIVE(pipe), 0); |
1039 | * pipe. | ||
1040 | */ | ||
1041 | I915_WRITE(CURSURFLIVE(pipe), 0); | ||
1042 | } | ||
1043 | } | 1060 | } |
1044 | 1061 | ||
1045 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) | 1062 | if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) |
@@ -1065,12 +1082,9 @@ void intel_psr_init(struct drm_i915_private *dev_priv) | |||
1065 | if (!dev_priv->psr.sink_support) | 1082 | if (!dev_priv->psr.sink_support) |
1066 | return; | 1083 | return; |
1067 | 1084 | ||
1068 | if (i915_modparams.enable_psr == -1) { | 1085 | if (i915_modparams.enable_psr == -1) |
1069 | i915_modparams.enable_psr = dev_priv->vbt.psr.enable; | 1086 | if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) |
1070 | 1087 | i915_modparams.enable_psr = 0; | |
1071 | /* Per platform default: all disabled. */ | ||
1072 | i915_modparams.enable_psr = 0; | ||
1073 | } | ||
1074 | 1088 | ||
1075 | /* Set link_standby x link_off defaults */ | 1089 | /* Set link_standby x link_off defaults */ |
1076 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 1090 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) |
@@ -1130,8 +1144,6 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) | |||
1130 | intel_psr_disable_locked(intel_dp); | 1144 | intel_psr_disable_locked(intel_dp); |
1131 | /* clear status register */ | 1145 | /* clear status register */ |
1132 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); | 1146 | drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val); |
1133 | |||
1134 | /* TODO: handle PSR2 errors */ | ||
1135 | exit: | 1147 | exit: |
1136 | mutex_unlock(&psr->lock); | 1148 | mutex_unlock(&psr->lock); |
1137 | } | 1149 | } |
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c new file mode 100644 index 000000000000..ec2b0fc92b8b --- /dev/null +++ b/drivers/gpu/drm/i915/intel_quirks.c | |||
@@ -0,0 +1,169 @@ | |||
1 | // SPDX-License-Identifier: MIT | ||
2 | /* | ||
3 | * Copyright © 2018 Intel Corporation | ||
4 | */ | ||
5 | |||
6 | #include <linux/dmi.h> | ||
7 | |||
8 | #include "intel_drv.h" | ||
9 | |||
10 | /* | ||
11 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | ||
12 | */ | ||
13 | static void quirk_ssc_force_disable(struct drm_i915_private *i915) | ||
14 | { | ||
15 | i915->quirks |= QUIRK_LVDS_SSC_DISABLE; | ||
16 | DRM_INFO("applying lvds SSC disable quirk\n"); | ||
17 | } | ||
18 | |||
19 | /* | ||
20 | * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight | ||
21 | * brightness value | ||
22 | */ | ||
23 | static void quirk_invert_brightness(struct drm_i915_private *i915) | ||
24 | { | ||
25 | i915->quirks |= QUIRK_INVERT_BRIGHTNESS; | ||
26 | DRM_INFO("applying inverted panel brightness quirk\n"); | ||
27 | } | ||
28 | |||
29 | /* Some VBT's incorrectly indicate no backlight is present */ | ||
30 | static void quirk_backlight_present(struct drm_i915_private *i915) | ||
31 | { | ||
32 | i915->quirks |= QUIRK_BACKLIGHT_PRESENT; | ||
33 | DRM_INFO("applying backlight present quirk\n"); | ||
34 | } | ||
35 | |||
36 | /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms | ||
37 | * which is 300 ms greater than eDP spec T12 min. | ||
38 | */ | ||
39 | static void quirk_increase_t12_delay(struct drm_i915_private *i915) | ||
40 | { | ||
41 | i915->quirks |= QUIRK_INCREASE_T12_DELAY; | ||
42 | DRM_INFO("Applying T12 delay quirk\n"); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * GeminiLake NUC HDMI outputs require additional off time | ||
47 | * this allows the onboard retimer to correctly sync to signal | ||
48 | */ | ||
49 | static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915) | ||
50 | { | ||
51 | i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME; | ||
52 | DRM_INFO("Applying Increase DDI Disabled quirk\n"); | ||
53 | } | ||
54 | |||
55 | struct intel_quirk { | ||
56 | int device; | ||
57 | int subsystem_vendor; | ||
58 | int subsystem_device; | ||
59 | void (*hook)(struct drm_i915_private *i915); | ||
60 | }; | ||
61 | |||
62 | /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ | ||
63 | struct intel_dmi_quirk { | ||
64 | void (*hook)(struct drm_i915_private *i915); | ||
65 | const struct dmi_system_id (*dmi_id_list)[]; | ||
66 | }; | ||
67 | |||
68 | static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) | ||
69 | { | ||
70 | DRM_INFO("Backlight polarity reversed on %s\n", id->ident); | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | static const struct intel_dmi_quirk intel_dmi_quirks[] = { | ||
75 | { | ||
76 | .dmi_id_list = &(const struct dmi_system_id[]) { | ||
77 | { | ||
78 | .callback = intel_dmi_reverse_brightness, | ||
79 | .ident = "NCR Corporation", | ||
80 | .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), | ||
81 | DMI_MATCH(DMI_PRODUCT_NAME, ""), | ||
82 | }, | ||
83 | }, | ||
84 | { } /* terminating entry */ | ||
85 | }, | ||
86 | .hook = quirk_invert_brightness, | ||
87 | }, | ||
88 | }; | ||
89 | |||
90 | static struct intel_quirk intel_quirks[] = { | ||
91 | /* Lenovo U160 cannot use SSC on LVDS */ | ||
92 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | ||
93 | |||
94 | /* Sony Vaio Y cannot use SSC on LVDS */ | ||
95 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | ||
96 | |||
97 | /* Acer Aspire 5734Z must invert backlight brightness */ | ||
98 | { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, | ||
99 | |||
100 | /* Acer/eMachines G725 */ | ||
101 | { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, | ||
102 | |||
103 | /* Acer/eMachines e725 */ | ||
104 | { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, | ||
105 | |||
106 | /* Acer/Packard Bell NCL20 */ | ||
107 | { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, | ||
108 | |||
109 | /* Acer Aspire 4736Z */ | ||
110 | { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, | ||
111 | |||
112 | /* Acer Aspire 5336 */ | ||
113 | { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, | ||
114 | |||
115 | /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ | ||
116 | { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, | ||
117 | |||
118 | /* Acer C720 Chromebook (Core i3 4005U) */ | ||
119 | { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, | ||
120 | |||
121 | /* Apple Macbook 2,1 (Core 2 T7400) */ | ||
122 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, | ||
123 | |||
124 | /* Apple Macbook 4,1 */ | ||
125 | { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, | ||
126 | |||
127 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ | ||
128 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, | ||
129 | |||
130 | /* HP Chromebook 14 (Celeron 2955U) */ | ||
131 | { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, | ||
132 | |||
133 | /* Dell Chromebook 11 */ | ||
134 | { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, | ||
135 | |||
136 | /* Dell Chromebook 11 (2015 version) */ | ||
137 | { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, | ||
138 | |||
139 | /* Toshiba Satellite P50-C-18C */ | ||
140 | { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, | ||
141 | |||
142 | /* GeminiLake NUC */ | ||
143 | { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, | ||
144 | { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time }, | ||
145 | /* ASRock ITX*/ | ||
146 | { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, | ||
147 | { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time }, | ||
148 | }; | ||
149 | |||
150 | void intel_init_quirks(struct drm_i915_private *i915) | ||
151 | { | ||
152 | struct pci_dev *d = i915->drm.pdev; | ||
153 | int i; | ||
154 | |||
155 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | ||
156 | struct intel_quirk *q = &intel_quirks[i]; | ||
157 | |||
158 | if (d->device == q->device && | ||
159 | (d->subsystem_vendor == q->subsystem_vendor || | ||
160 | q->subsystem_vendor == PCI_ANY_ID) && | ||
161 | (d->subsystem_device == q->subsystem_device || | ||
162 | q->subsystem_device == PCI_ANY_ID)) | ||
163 | q->hook(i915); | ||
164 | } | ||
165 | for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { | ||
166 | if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) | ||
167 | intel_dmi_quirks[i].hook(i915); | ||
168 | } | ||
169 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d0ef50bf930a..87eebc13c0d8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -91,6 +91,7 @@ static int | |||
91 | gen4_render_ring_flush(struct i915_request *rq, u32 mode) | 91 | gen4_render_ring_flush(struct i915_request *rq, u32 mode) |
92 | { | 92 | { |
93 | u32 cmd, *cs; | 93 | u32 cmd, *cs; |
94 | int i; | ||
94 | 95 | ||
95 | /* | 96 | /* |
96 | * read/write caches: | 97 | * read/write caches: |
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode) | |||
127 | cmd |= MI_INVALIDATE_ISP; | 128 | cmd |= MI_INVALIDATE_ISP; |
128 | } | 129 | } |
129 | 130 | ||
130 | cs = intel_ring_begin(rq, 2); | 131 | i = 2; |
132 | if (mode & EMIT_INVALIDATE) | ||
133 | i += 20; | ||
134 | |||
135 | cs = intel_ring_begin(rq, i); | ||
131 | if (IS_ERR(cs)) | 136 | if (IS_ERR(cs)) |
132 | return PTR_ERR(cs); | 137 | return PTR_ERR(cs); |
133 | 138 | ||
134 | *cs++ = cmd; | 139 | *cs++ = cmd; |
135 | *cs++ = MI_NOOP; | 140 | |
141 | /* | ||
142 | * A random delay to let the CS invalidate take effect? Without this | ||
143 | * delay, the GPU relocation path fails as the CS does not see | ||
144 | * the updated contents. Just as important, if we apply the flushes | ||
145 | * to the EMIT_FLUSH branch (i.e. immediately after the relocation | ||
146 | * write and before the invalidate on the next batch), the relocations | ||
147 | * still fail. This implies that is a delay following invalidation | ||
148 | * that is required to reset the caches as opposed to a delay to | ||
149 | * ensure the memory is written. | ||
150 | */ | ||
151 | if (mode & EMIT_INVALIDATE) { | ||
152 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | ||
153 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | ||
154 | PIPE_CONTROL_GLOBAL_GTT; | ||
155 | *cs++ = 0; | ||
156 | *cs++ = 0; | ||
157 | |||
158 | for (i = 0; i < 12; i++) | ||
159 | *cs++ = MI_FLUSH; | ||
160 | |||
161 | *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE; | ||
162 | *cs++ = i915_ggtt_offset(rq->engine->scratch) | | ||
163 | PIPE_CONTROL_GLOBAL_GTT; | ||
164 | *cs++ = 0; | ||
165 | *cs++ = 0; | ||
166 | } | ||
167 | |||
168 | *cs++ = cmd; | ||
169 | |||
136 | intel_ring_advance(rq, cs); | 170 | intel_ring_advance(rq, cs); |
137 | 171 | ||
138 | return 0; | 172 | return 0; |
@@ -574,7 +608,9 @@ static void skip_request(struct i915_request *rq) | |||
574 | 608 | ||
575 | static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) | 609 | static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq) |
576 | { | 610 | { |
577 | GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0); | 611 | GEM_TRACE("%s request global=%d, current=%d\n", |
612 | engine->name, rq ? rq->global_seqno : 0, | ||
613 | intel_engine_get_seqno(engine)); | ||
578 | 614 | ||
579 | /* | 615 | /* |
580 | * Try to restore the logical GPU state to match the continuation | 616 | * Try to restore the logical GPU state to match the continuation |
@@ -1021,8 +1057,7 @@ i915_emit_bb_start(struct i915_request *rq, | |||
1021 | int intel_ring_pin(struct intel_ring *ring) | 1057 | int intel_ring_pin(struct intel_ring *ring) |
1022 | { | 1058 | { |
1023 | struct i915_vma *vma = ring->vma; | 1059 | struct i915_vma *vma = ring->vma; |
1024 | enum i915_map_type map = | 1060 | enum i915_map_type map = i915_coherent_map_type(vma->vm->i915); |
1025 | HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC; | ||
1026 | unsigned int flags; | 1061 | unsigned int flags; |
1027 | void *addr; | 1062 | void *addr; |
1028 | int ret; | 1063 | int ret; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2dfa585712c2..8a2270b209b0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: MIT */ |
2 | #ifndef _INTEL_RINGBUFFER_H_ | 2 | #ifndef _INTEL_RINGBUFFER_H_ |
3 | #define _INTEL_RINGBUFFER_H_ | 3 | #define _INTEL_RINGBUFFER_H_ |
4 | 4 | ||
@@ -93,11 +93,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) | |||
93 | #define I915_MAX_SUBSLICES 8 | 93 | #define I915_MAX_SUBSLICES 8 |
94 | 94 | ||
95 | #define instdone_slice_mask(dev_priv__) \ | 95 | #define instdone_slice_mask(dev_priv__) \ |
96 | (INTEL_GEN(dev_priv__) == 7 ? \ | 96 | (IS_GEN7(dev_priv__) ? \ |
97 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) | 97 | 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask) |
98 | 98 | ||
99 | #define instdone_subslice_mask(dev_priv__) \ | 99 | #define instdone_subslice_mask(dev_priv__) \ |
100 | (INTEL_GEN(dev_priv__) == 7 ? \ | 100 | (IS_GEN7(dev_priv__) ? \ |
101 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) | 101 | 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0]) |
102 | 102 | ||
103 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ | 103 | #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ |
@@ -190,11 +190,22 @@ enum intel_engine_id { | |||
190 | }; | 190 | }; |
191 | 191 | ||
192 | struct i915_priolist { | 192 | struct i915_priolist { |
193 | struct list_head requests[I915_PRIORITY_COUNT]; | ||
193 | struct rb_node node; | 194 | struct rb_node node; |
194 | struct list_head requests; | 195 | unsigned long used; |
195 | int priority; | 196 | int priority; |
196 | }; | 197 | }; |
197 | 198 | ||
199 | #define priolist_for_each_request(it, plist, idx) \ | ||
200 | for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ | ||
201 | list_for_each_entry(it, &(plist)->requests[idx], sched.link) | ||
202 | |||
203 | #define priolist_for_each_request_consume(it, n, plist, idx) \ | ||
204 | for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ | ||
205 | list_for_each_entry_safe(it, n, \ | ||
206 | &(plist)->requests[idx - 1], \ | ||
207 | sched.link) | ||
208 | |||
198 | struct st_preempt_hang { | 209 | struct st_preempt_hang { |
199 | struct completion completion; | 210 | struct completion completion; |
200 | bool inject_hang; | 211 | bool inject_hang; |
@@ -487,11 +498,10 @@ struct intel_engine_cs { | |||
487 | */ | 498 | */ |
488 | void (*submit_request)(struct i915_request *rq); | 499 | void (*submit_request)(struct i915_request *rq); |
489 | 500 | ||
490 | /* Call when the priority on a request has changed and it and its | 501 | /* |
502 | * Call when the priority on a request has changed and it and its | ||
491 | * dependencies may need rescheduling. Note the request itself may | 503 | * dependencies may need rescheduling. Note the request itself may |
492 | * not be ready to run! | 504 | * not be ready to run! |
493 | * | ||
494 | * Called under the struct_mutex. | ||
495 | */ | 505 | */ |
496 | void (*schedule)(struct i915_request *request, | 506 | void (*schedule)(struct i915_request *request, |
497 | const struct i915_sched_attr *attr); | 507 | const struct i915_sched_attr *attr); |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 0fdabce647ab..1c2de9b69a19 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -208,7 +208,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, | |||
208 | 208 | ||
209 | is_enabled = true; | 209 | is_enabled = true; |
210 | 210 | ||
211 | for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) { | 211 | for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { |
212 | if (power_well->desc->always_on) | 212 | if (power_well->desc->always_on) |
213 | continue; | 213 | continue; |
214 | 214 | ||
@@ -436,6 +436,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, | |||
436 | I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); | 436 | I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX); |
437 | 437 | ||
438 | hsw_wait_for_power_well_enable(dev_priv, power_well); | 438 | hsw_wait_for_power_well_enable(dev_priv, power_well); |
439 | |||
440 | /* Display WA #1178: icl */ | ||
441 | if (IS_ICELAKE(dev_priv) && | ||
442 | pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && | ||
443 | !intel_bios_is_port_edp(dev_priv, port)) { | ||
444 | val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx)); | ||
445 | val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; | ||
446 | I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val); | ||
447 | } | ||
439 | } | 448 | } |
440 | 449 | ||
441 | static void | 450 | static void |
@@ -456,6 +465,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, | |||
456 | hsw_wait_for_power_well_disable(dev_priv, power_well); | 465 | hsw_wait_for_power_well_disable(dev_priv, power_well); |
457 | } | 466 | } |
458 | 467 | ||
468 | #define ICL_AUX_PW_TO_CH(pw_idx) \ | ||
469 | ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) | ||
470 | |||
471 | static void | ||
472 | icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, | ||
473 | struct i915_power_well *power_well) | ||
474 | { | ||
475 | enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx); | ||
476 | u32 val; | ||
477 | |||
478 | val = I915_READ(DP_AUX_CH_CTL(aux_ch)); | ||
479 | val &= ~DP_AUX_CH_CTL_TBT_IO; | ||
480 | if (power_well->desc->hsw.is_tc_tbt) | ||
481 | val |= DP_AUX_CH_CTL_TBT_IO; | ||
482 | I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); | ||
483 | |||
484 | hsw_power_well_enable(dev_priv, power_well); | ||
485 | } | ||
486 | |||
459 | /* | 487 | /* |
460 | * We should only use the power well if we explicitly asked the hardware to | 488 | * We should only use the power well if we explicitly asked the hardware to |
461 | * enable it, so check if it's enabled and also check if we've requested it to | 489 | * enable it, so check if it's enabled and also check if we've requested it to |
@@ -465,11 +493,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, | |||
465 | struct i915_power_well *power_well) | 493 | struct i915_power_well *power_well) |
466 | { | 494 | { |
467 | const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; | 495 | const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; |
496 | enum i915_power_well_id id = power_well->desc->id; | ||
468 | int pw_idx = power_well->desc->hsw.idx; | 497 | int pw_idx = power_well->desc->hsw.idx; |
469 | u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | | 498 | u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | |
470 | HSW_PWR_WELL_CTL_STATE(pw_idx); | 499 | HSW_PWR_WELL_CTL_STATE(pw_idx); |
500 | u32 val; | ||
471 | 501 | ||
472 | return (I915_READ(regs->driver) & mask) == mask; | 502 | val = I915_READ(regs->driver); |
503 | |||
504 | /* | ||
505 | * On GEN9 big core due to a DMC bug the driver's request bits for PW1 | ||
506 | * and the MISC_IO PW will be not restored, so check instead for the | ||
507 | * BIOS's own request bits, which are forced-on for these power wells | ||
508 | * when exiting DC5/6. | ||
509 | */ | ||
510 | if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) && | ||
511 | (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) | ||
512 | val |= I915_READ(regs->bios); | ||
513 | |||
514 | return (val & mask) == mask; | ||
473 | } | 515 | } |
474 | 516 | ||
475 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) | 517 | static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) |
@@ -551,7 +593,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) | |||
551 | u32 mask; | 593 | u32 mask; |
552 | 594 | ||
553 | mask = DC_STATE_EN_UPTO_DC5; | 595 | mask = DC_STATE_EN_UPTO_DC5; |
554 | if (IS_GEN9_LP(dev_priv)) | 596 | if (INTEL_GEN(dev_priv) >= 11) |
597 | mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; | ||
598 | else if (IS_GEN9_LP(dev_priv)) | ||
555 | mask |= DC_STATE_EN_DC9; | 599 | mask |= DC_STATE_EN_DC9; |
556 | else | 600 | else |
557 | mask |= DC_STATE_EN_UPTO_DC6; | 601 | mask |= DC_STATE_EN_UPTO_DC6; |
@@ -624,8 +668,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv) | |||
624 | assert_can_enable_dc9(dev_priv); | 668 | assert_can_enable_dc9(dev_priv); |
625 | 669 | ||
626 | DRM_DEBUG_KMS("Enabling DC9\n"); | 670 | DRM_DEBUG_KMS("Enabling DC9\n"); |
627 | 671 | /* | |
628 | intel_power_sequencer_reset(dev_priv); | 672 | * Power sequencer reset is not needed on |
673 | * platforms with South Display Engine on PCH, | ||
674 | * because PPS registers are always on. | ||
675 | */ | ||
676 | if (!HAS_PCH_SPLIT(dev_priv)) | ||
677 | intel_power_sequencer_reset(dev_priv); | ||
629 | gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); | 678 | gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); |
630 | } | 679 | } |
631 | 680 | ||
@@ -707,7 +756,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) | |||
707 | assert_csr_loaded(dev_priv); | 756 | assert_csr_loaded(dev_priv); |
708 | } | 757 | } |
709 | 758 | ||
710 | static void skl_enable_dc6(struct drm_i915_private *dev_priv) | 759 | void skl_enable_dc6(struct drm_i915_private *dev_priv) |
711 | { | 760 | { |
712 | assert_can_enable_dc6(dev_priv); | 761 | assert_can_enable_dc6(dev_priv); |
713 | 762 | ||
@@ -808,6 +857,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, | |||
808 | 857 | ||
809 | if (IS_GEN9_LP(dev_priv)) | 858 | if (IS_GEN9_LP(dev_priv)) |
810 | bxt_verify_ddi_phy_power_wells(dev_priv); | 859 | bxt_verify_ddi_phy_power_wells(dev_priv); |
860 | |||
861 | if (INTEL_GEN(dev_priv) >= 11) | ||
862 | /* | ||
863 | * DMC retains HW context only for port A, the other combo | ||
864 | * PHY's HW context for port B is lost after DC transitions, | ||
865 | * so we need to restore it manually. | ||
866 | */ | ||
867 | icl_combo_phys_init(dev_priv); | ||
811 | } | 868 | } |
812 | 869 | ||
813 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, | 870 | static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, |
@@ -1608,7 +1665,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1608 | intel_display_power_domain_str(domain)); | 1665 | intel_display_power_domain_str(domain)); |
1609 | power_domains->domain_use_count[domain]--; | 1666 | power_domains->domain_use_count[domain]--; |
1610 | 1667 | ||
1611 | for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) | 1668 | for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) |
1612 | intel_power_well_put(dev_priv, power_well); | 1669 | intel_power_well_put(dev_priv, power_well); |
1613 | 1670 | ||
1614 | mutex_unlock(&power_domains->lock); | 1671 | mutex_unlock(&power_domains->lock); |
@@ -2041,7 +2098,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { | |||
2041 | static const struct i915_power_well_desc i9xx_always_on_power_well[] = { | 2098 | static const struct i915_power_well_desc i9xx_always_on_power_well[] = { |
2042 | { | 2099 | { |
2043 | .name = "always-on", | 2100 | .name = "always-on", |
2044 | .always_on = 1, | 2101 | .always_on = true, |
2045 | .domains = POWER_DOMAIN_MASK, | 2102 | .domains = POWER_DOMAIN_MASK, |
2046 | .ops = &i9xx_always_on_power_well_ops, | 2103 | .ops = &i9xx_always_on_power_well_ops, |
2047 | .id = DISP_PW_ID_NONE, | 2104 | .id = DISP_PW_ID_NONE, |
@@ -2058,7 +2115,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = { | |||
2058 | static const struct i915_power_well_desc i830_power_wells[] = { | 2115 | static const struct i915_power_well_desc i830_power_wells[] = { |
2059 | { | 2116 | { |
2060 | .name = "always-on", | 2117 | .name = "always-on", |
2061 | .always_on = 1, | 2118 | .always_on = true, |
2062 | .domains = POWER_DOMAIN_MASK, | 2119 | .domains = POWER_DOMAIN_MASK, |
2063 | .ops = &i9xx_always_on_power_well_ops, | 2120 | .ops = &i9xx_always_on_power_well_ops, |
2064 | .id = DISP_PW_ID_NONE, | 2121 | .id = DISP_PW_ID_NONE, |
@@ -2102,7 +2159,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = { | |||
2102 | static const struct i915_power_well_desc hsw_power_wells[] = { | 2159 | static const struct i915_power_well_desc hsw_power_wells[] = { |
2103 | { | 2160 | { |
2104 | .name = "always-on", | 2161 | .name = "always-on", |
2105 | .always_on = 1, | 2162 | .always_on = true, |
2106 | .domains = POWER_DOMAIN_MASK, | 2163 | .domains = POWER_DOMAIN_MASK, |
2107 | .ops = &i9xx_always_on_power_well_ops, | 2164 | .ops = &i9xx_always_on_power_well_ops, |
2108 | .id = DISP_PW_ID_NONE, | 2165 | .id = DISP_PW_ID_NONE, |
@@ -2123,7 +2180,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = { | |||
2123 | static const struct i915_power_well_desc bdw_power_wells[] = { | 2180 | static const struct i915_power_well_desc bdw_power_wells[] = { |
2124 | { | 2181 | { |
2125 | .name = "always-on", | 2182 | .name = "always-on", |
2126 | .always_on = 1, | 2183 | .always_on = true, |
2127 | .domains = POWER_DOMAIN_MASK, | 2184 | .domains = POWER_DOMAIN_MASK, |
2128 | .ops = &i9xx_always_on_power_well_ops, | 2185 | .ops = &i9xx_always_on_power_well_ops, |
2129 | .id = DISP_PW_ID_NONE, | 2186 | .id = DISP_PW_ID_NONE, |
@@ -2166,7 +2223,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = { | |||
2166 | static const struct i915_power_well_desc vlv_power_wells[] = { | 2223 | static const struct i915_power_well_desc vlv_power_wells[] = { |
2167 | { | 2224 | { |
2168 | .name = "always-on", | 2225 | .name = "always-on", |
2169 | .always_on = 1, | 2226 | .always_on = true, |
2170 | .domains = POWER_DOMAIN_MASK, | 2227 | .domains = POWER_DOMAIN_MASK, |
2171 | .ops = &i9xx_always_on_power_well_ops, | 2228 | .ops = &i9xx_always_on_power_well_ops, |
2172 | .id = DISP_PW_ID_NONE, | 2229 | .id = DISP_PW_ID_NONE, |
@@ -2242,7 +2299,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = { | |||
2242 | static const struct i915_power_well_desc chv_power_wells[] = { | 2299 | static const struct i915_power_well_desc chv_power_wells[] = { |
2243 | { | 2300 | { |
2244 | .name = "always-on", | 2301 | .name = "always-on", |
2245 | .always_on = 1, | 2302 | .always_on = true, |
2246 | .domains = POWER_DOMAIN_MASK, | 2303 | .domains = POWER_DOMAIN_MASK, |
2247 | .ops = &i9xx_always_on_power_well_ops, | 2304 | .ops = &i9xx_always_on_power_well_ops, |
2248 | .id = DISP_PW_ID_NONE, | 2305 | .id = DISP_PW_ID_NONE, |
@@ -2293,7 +2350,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, | |||
2293 | static const struct i915_power_well_desc skl_power_wells[] = { | 2350 | static const struct i915_power_well_desc skl_power_wells[] = { |
2294 | { | 2351 | { |
2295 | .name = "always-on", | 2352 | .name = "always-on", |
2296 | .always_on = 1, | 2353 | .always_on = true, |
2297 | .domains = POWER_DOMAIN_MASK, | 2354 | .domains = POWER_DOMAIN_MASK, |
2298 | .ops = &i9xx_always_on_power_well_ops, | 2355 | .ops = &i9xx_always_on_power_well_ops, |
2299 | .id = DISP_PW_ID_NONE, | 2356 | .id = DISP_PW_ID_NONE, |
@@ -2301,6 +2358,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { | |||
2301 | { | 2358 | { |
2302 | .name = "power well 1", | 2359 | .name = "power well 1", |
2303 | /* Handled by the DMC firmware */ | 2360 | /* Handled by the DMC firmware */ |
2361 | .always_on = true, | ||
2304 | .domains = 0, | 2362 | .domains = 0, |
2305 | .ops = &hsw_power_well_ops, | 2363 | .ops = &hsw_power_well_ops, |
2306 | .id = SKL_DISP_PW_1, | 2364 | .id = SKL_DISP_PW_1, |
@@ -2313,6 +2371,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { | |||
2313 | { | 2371 | { |
2314 | .name = "MISC IO power well", | 2372 | .name = "MISC IO power well", |
2315 | /* Handled by the DMC firmware */ | 2373 | /* Handled by the DMC firmware */ |
2374 | .always_on = true, | ||
2316 | .domains = 0, | 2375 | .domains = 0, |
2317 | .ops = &hsw_power_well_ops, | 2376 | .ops = &hsw_power_well_ops, |
2318 | .id = SKL_DISP_PW_MISC_IO, | 2377 | .id = SKL_DISP_PW_MISC_IO, |
@@ -2385,13 +2444,15 @@ static const struct i915_power_well_desc skl_power_wells[] = { | |||
2385 | static const struct i915_power_well_desc bxt_power_wells[] = { | 2444 | static const struct i915_power_well_desc bxt_power_wells[] = { |
2386 | { | 2445 | { |
2387 | .name = "always-on", | 2446 | .name = "always-on", |
2388 | .always_on = 1, | 2447 | .always_on = true, |
2389 | .domains = POWER_DOMAIN_MASK, | 2448 | .domains = POWER_DOMAIN_MASK, |
2390 | .ops = &i9xx_always_on_power_well_ops, | 2449 | .ops = &i9xx_always_on_power_well_ops, |
2391 | .id = DISP_PW_ID_NONE, | 2450 | .id = DISP_PW_ID_NONE, |
2392 | }, | 2451 | }, |
2393 | { | 2452 | { |
2394 | .name = "power well 1", | 2453 | .name = "power well 1", |
2454 | /* Handled by the DMC firmware */ | ||
2455 | .always_on = true, | ||
2395 | .domains = 0, | 2456 | .domains = 0, |
2396 | .ops = &hsw_power_well_ops, | 2457 | .ops = &hsw_power_well_ops, |
2397 | .id = SKL_DISP_PW_1, | 2458 | .id = SKL_DISP_PW_1, |
@@ -2443,7 +2504,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { | |||
2443 | static const struct i915_power_well_desc glk_power_wells[] = { | 2504 | static const struct i915_power_well_desc glk_power_wells[] = { |
2444 | { | 2505 | { |
2445 | .name = "always-on", | 2506 | .name = "always-on", |
2446 | .always_on = 1, | 2507 | .always_on = true, |
2447 | .domains = POWER_DOMAIN_MASK, | 2508 | .domains = POWER_DOMAIN_MASK, |
2448 | .ops = &i9xx_always_on_power_well_ops, | 2509 | .ops = &i9xx_always_on_power_well_ops, |
2449 | .id = DISP_PW_ID_NONE, | 2510 | .id = DISP_PW_ID_NONE, |
@@ -2451,6 +2512,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { | |||
2451 | { | 2512 | { |
2452 | .name = "power well 1", | 2513 | .name = "power well 1", |
2453 | /* Handled by the DMC firmware */ | 2514 | /* Handled by the DMC firmware */ |
2515 | .always_on = true, | ||
2454 | .domains = 0, | 2516 | .domains = 0, |
2455 | .ops = &hsw_power_well_ops, | 2517 | .ops = &hsw_power_well_ops, |
2456 | .id = SKL_DISP_PW_1, | 2518 | .id = SKL_DISP_PW_1, |
@@ -2571,7 +2633,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { | |||
2571 | static const struct i915_power_well_desc cnl_power_wells[] = { | 2633 | static const struct i915_power_well_desc cnl_power_wells[] = { |
2572 | { | 2634 | { |
2573 | .name = "always-on", | 2635 | .name = "always-on", |
2574 | .always_on = 1, | 2636 | .always_on = true, |
2575 | .domains = POWER_DOMAIN_MASK, | 2637 | .domains = POWER_DOMAIN_MASK, |
2576 | .ops = &i9xx_always_on_power_well_ops, | 2638 | .ops = &i9xx_always_on_power_well_ops, |
2577 | .id = DISP_PW_ID_NONE, | 2639 | .id = DISP_PW_ID_NONE, |
@@ -2579,6 +2641,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { | |||
2579 | { | 2641 | { |
2580 | .name = "power well 1", | 2642 | .name = "power well 1", |
2581 | /* Handled by the DMC firmware */ | 2643 | /* Handled by the DMC firmware */ |
2644 | .always_on = true, | ||
2582 | .domains = 0, | 2645 | .domains = 0, |
2583 | .ops = &hsw_power_well_ops, | 2646 | .ops = &hsw_power_well_ops, |
2584 | .id = SKL_DISP_PW_1, | 2647 | .id = SKL_DISP_PW_1, |
@@ -2716,6 +2779,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = { | |||
2716 | .is_enabled = hsw_power_well_enabled, | 2779 | .is_enabled = hsw_power_well_enabled, |
2717 | }; | 2780 | }; |
2718 | 2781 | ||
2782 | static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = { | ||
2783 | .sync_hw = hsw_power_well_sync_hw, | ||
2784 | .enable = icl_tc_phy_aux_power_well_enable, | ||
2785 | .disable = hsw_power_well_disable, | ||
2786 | .is_enabled = hsw_power_well_enabled, | ||
2787 | }; | ||
2788 | |||
2719 | static const struct i915_power_well_regs icl_aux_power_well_regs = { | 2789 | static const struct i915_power_well_regs icl_aux_power_well_regs = { |
2720 | .bios = ICL_PWR_WELL_CTL_AUX1, | 2790 | .bios = ICL_PWR_WELL_CTL_AUX1, |
2721 | .driver = ICL_PWR_WELL_CTL_AUX2, | 2791 | .driver = ICL_PWR_WELL_CTL_AUX2, |
@@ -2731,7 +2801,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = { | |||
2731 | static const struct i915_power_well_desc icl_power_wells[] = { | 2801 | static const struct i915_power_well_desc icl_power_wells[] = { |
2732 | { | 2802 | { |
2733 | .name = "always-on", | 2803 | .name = "always-on", |
2734 | .always_on = 1, | 2804 | .always_on = true, |
2735 | .domains = POWER_DOMAIN_MASK, | 2805 | .domains = POWER_DOMAIN_MASK, |
2736 | .ops = &i9xx_always_on_power_well_ops, | 2806 | .ops = &i9xx_always_on_power_well_ops, |
2737 | .id = DISP_PW_ID_NONE, | 2807 | .id = DISP_PW_ID_NONE, |
@@ -2739,6 +2809,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { | |||
2739 | { | 2809 | { |
2740 | .name = "power well 1", | 2810 | .name = "power well 1", |
2741 | /* Handled by the DMC firmware */ | 2811 | /* Handled by the DMC firmware */ |
2812 | .always_on = true, | ||
2742 | .domains = 0, | 2813 | .domains = 0, |
2743 | .ops = &hsw_power_well_ops, | 2814 | .ops = &hsw_power_well_ops, |
2744 | .id = SKL_DISP_PW_1, | 2815 | .id = SKL_DISP_PW_1, |
@@ -2749,6 +2820,12 @@ static const struct i915_power_well_desc icl_power_wells[] = { | |||
2749 | }, | 2820 | }, |
2750 | }, | 2821 | }, |
2751 | { | 2822 | { |
2823 | .name = "DC off", | ||
2824 | .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, | ||
2825 | .ops = &gen9_dc_off_power_well_ops, | ||
2826 | .id = DISP_PW_ID_NONE, | ||
2827 | }, | ||
2828 | { | ||
2752 | .name = "power well 2", | 2829 | .name = "power well 2", |
2753 | .domains = ICL_PW_2_POWER_DOMAINS, | 2830 | .domains = ICL_PW_2_POWER_DOMAINS, |
2754 | .ops = &hsw_power_well_ops, | 2831 | .ops = &hsw_power_well_ops, |
@@ -2760,12 +2837,6 @@ static const struct i915_power_well_desc icl_power_wells[] = { | |||
2760 | }, | 2837 | }, |
2761 | }, | 2838 | }, |
2762 | { | 2839 | { |
2763 | .name = "DC off", | ||
2764 | .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, | ||
2765 | .ops = &gen9_dc_off_power_well_ops, | ||
2766 | .id = DISP_PW_ID_NONE, | ||
2767 | }, | ||
2768 | { | ||
2769 | .name = "power well 3", | 2840 | .name = "power well 3", |
2770 | .domains = ICL_PW_3_POWER_DOMAINS, | 2841 | .domains = ICL_PW_3_POWER_DOMAINS, |
2771 | .ops = &hsw_power_well_ops, | 2842 | .ops = &hsw_power_well_ops, |
@@ -2861,81 +2932,89 @@ static const struct i915_power_well_desc icl_power_wells[] = { | |||
2861 | { | 2932 | { |
2862 | .name = "AUX C", | 2933 | .name = "AUX C", |
2863 | .domains = ICL_AUX_C_IO_POWER_DOMAINS, | 2934 | .domains = ICL_AUX_C_IO_POWER_DOMAINS, |
2864 | .ops = &hsw_power_well_ops, | 2935 | .ops = &icl_tc_phy_aux_power_well_ops, |
2865 | .id = DISP_PW_ID_NONE, | 2936 | .id = DISP_PW_ID_NONE, |
2866 | { | 2937 | { |
2867 | .hsw.regs = &icl_aux_power_well_regs, | 2938 | .hsw.regs = &icl_aux_power_well_regs, |
2868 | .hsw.idx = ICL_PW_CTL_IDX_AUX_C, | 2939 | .hsw.idx = ICL_PW_CTL_IDX_AUX_C, |
2940 | .hsw.is_tc_tbt = false, | ||
2869 | }, | 2941 | }, |
2870 | }, | 2942 | }, |
2871 | { | 2943 | { |
2872 | .name = "AUX D", | 2944 | .name = "AUX D", |
2873 | .domains = ICL_AUX_D_IO_POWER_DOMAINS, | 2945 | .domains = ICL_AUX_D_IO_POWER_DOMAINS, |
2874 | .ops = &hsw_power_well_ops, | 2946 | .ops = &icl_tc_phy_aux_power_well_ops, |
2875 | .id = DISP_PW_ID_NONE, | 2947 | .id = DISP_PW_ID_NONE, |
2876 | { | 2948 | { |
2877 | .hsw.regs = &icl_aux_power_well_regs, | 2949 | .hsw.regs = &icl_aux_power_well_regs, |
2878 | .hsw.idx = ICL_PW_CTL_IDX_AUX_D, | 2950 | .hsw.idx = ICL_PW_CTL_IDX_AUX_D, |
2951 | .hsw.is_tc_tbt = false, | ||
2879 | }, | 2952 | }, |
2880 | }, | 2953 | }, |
2881 | { | 2954 | { |
2882 | .name = "AUX E", | 2955 | .name = "AUX E", |
2883 | .domains = ICL_AUX_E_IO_POWER_DOMAINS, | 2956 | .domains = ICL_AUX_E_IO_POWER_DOMAINS, |
2884 | .ops = &hsw_power_well_ops, | 2957 | .ops = &icl_tc_phy_aux_power_well_ops, |
2885 | .id = DISP_PW_ID_NONE, | 2958 | .id = DISP_PW_ID_NONE, |
2886 | { | 2959 | { |
2887 | .hsw.regs = &icl_aux_power_well_regs, | 2960 | .hsw.regs = &icl_aux_power_well_regs, |
2888 | .hsw.idx = ICL_PW_CTL_IDX_AUX_E, | 2961 | .hsw.idx = ICL_PW_CTL_IDX_AUX_E, |
2962 | .hsw.is_tc_tbt = false, | ||
2889 | }, | 2963 | }, |
2890 | }, | 2964 | }, |
2891 | { | 2965 | { |
2892 | .name = "AUX F", | 2966 | .name = "AUX F", |
2893 | .domains = ICL_AUX_F_IO_POWER_DOMAINS, | 2967 | .domains = ICL_AUX_F_IO_POWER_DOMAINS, |
2894 | .ops = &hsw_power_well_ops, | 2968 | .ops = &icl_tc_phy_aux_power_well_ops, |
2895 | .id = DISP_PW_ID_NONE, | 2969 | .id = DISP_PW_ID_NONE, |
2896 | { | 2970 | { |
2897 | .hsw.regs = &icl_aux_power_well_regs, | 2971 | .hsw.regs = &icl_aux_power_well_regs, |
2898 | .hsw.idx = ICL_PW_CTL_IDX_AUX_F, | 2972 | .hsw.idx = ICL_PW_CTL_IDX_AUX_F, |
2973 | .hsw.is_tc_tbt = false, | ||
2899 | }, | 2974 | }, |
2900 | }, | 2975 | }, |
2901 | { | 2976 | { |
2902 | .name = "AUX TBT1", | 2977 | .name = "AUX TBT1", |
2903 | .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, | 2978 | .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, |
2904 | .ops = &hsw_power_well_ops, | 2979 | .ops = &icl_tc_phy_aux_power_well_ops, |
2905 | .id = DISP_PW_ID_NONE, | 2980 | .id = DISP_PW_ID_NONE, |
2906 | { | 2981 | { |
2907 | .hsw.regs = &icl_aux_power_well_regs, | 2982 | .hsw.regs = &icl_aux_power_well_regs, |
2908 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, | 2983 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, |
2984 | .hsw.is_tc_tbt = true, | ||
2909 | }, | 2985 | }, |
2910 | }, | 2986 | }, |
2911 | { | 2987 | { |
2912 | .name = "AUX TBT2", | 2988 | .name = "AUX TBT2", |
2913 | .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, | 2989 | .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, |
2914 | .ops = &hsw_power_well_ops, | 2990 | .ops = &icl_tc_phy_aux_power_well_ops, |
2915 | .id = DISP_PW_ID_NONE, | 2991 | .id = DISP_PW_ID_NONE, |
2916 | { | 2992 | { |
2917 | .hsw.regs = &icl_aux_power_well_regs, | 2993 | .hsw.regs = &icl_aux_power_well_regs, |
2918 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, | 2994 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, |
2995 | .hsw.is_tc_tbt = true, | ||
2919 | }, | 2996 | }, |
2920 | }, | 2997 | }, |
2921 | { | 2998 | { |
2922 | .name = "AUX TBT3", | 2999 | .name = "AUX TBT3", |
2923 | .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, | 3000 | .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, |
2924 | .ops = &hsw_power_well_ops, | 3001 | .ops = &icl_tc_phy_aux_power_well_ops, |
2925 | .id = DISP_PW_ID_NONE, | 3002 | .id = DISP_PW_ID_NONE, |
2926 | { | 3003 | { |
2927 | .hsw.regs = &icl_aux_power_well_regs, | 3004 | .hsw.regs = &icl_aux_power_well_regs, |
2928 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, | 3005 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, |
3006 | .hsw.is_tc_tbt = true, | ||
2929 | }, | 3007 | }, |
2930 | }, | 3008 | }, |
2931 | { | 3009 | { |
2932 | .name = "AUX TBT4", | 3010 | .name = "AUX TBT4", |
2933 | .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, | 3011 | .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, |
2934 | .ops = &hsw_power_well_ops, | 3012 | .ops = &icl_tc_phy_aux_power_well_ops, |
2935 | .id = DISP_PW_ID_NONE, | 3013 | .id = DISP_PW_ID_NONE, |
2936 | { | 3014 | { |
2937 | .hsw.regs = &icl_aux_power_well_regs, | 3015 | .hsw.regs = &icl_aux_power_well_regs, |
2938 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, | 3016 | .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, |
3017 | .hsw.is_tc_tbt = true, | ||
2939 | }, | 3018 | }, |
2940 | }, | 3019 | }, |
2941 | { | 3020 | { |
@@ -2969,17 +3048,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, | |||
2969 | int requested_dc; | 3048 | int requested_dc; |
2970 | int max_dc; | 3049 | int max_dc; |
2971 | 3050 | ||
2972 | if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) { | 3051 | if (INTEL_GEN(dev_priv) >= 11) { |
2973 | max_dc = 2; | 3052 | max_dc = 2; |
2974 | mask = 0; | ||
2975 | } else if (IS_GEN9_LP(dev_priv)) { | ||
2976 | max_dc = 1; | ||
2977 | /* | 3053 | /* |
2978 | * DC9 has a separate HW flow from the rest of the DC states, | 3054 | * DC9 has a separate HW flow from the rest of the DC states, |
2979 | * not depending on the DMC firmware. It's needed by system | 3055 | * not depending on the DMC firmware. It's needed by system |
2980 | * suspend/resume, so allow it unconditionally. | 3056 | * suspend/resume, so allow it unconditionally. |
2981 | */ | 3057 | */ |
2982 | mask = DC_STATE_EN_DC9; | 3058 | mask = DC_STATE_EN_DC9; |
3059 | } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) { | ||
3060 | max_dc = 2; | ||
3061 | mask = 0; | ||
3062 | } else if (IS_GEN9_LP(dev_priv)) { | ||
3063 | max_dc = 1; | ||
3064 | mask = DC_STATE_EN_DC9; | ||
2983 | } else { | 3065 | } else { |
2984 | max_dc = 0; | 3066 | max_dc = 0; |
2985 | mask = 0; | 3067 | mask = 0; |
@@ -3075,12 +3157,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
3075 | */ | 3157 | */ |
3076 | if (IS_ICELAKE(dev_priv)) { | 3158 | if (IS_ICELAKE(dev_priv)) { |
3077 | err = set_power_wells(power_domains, icl_power_wells); | 3159 | err = set_power_wells(power_domains, icl_power_wells); |
3078 | } else if (IS_HASWELL(dev_priv)) { | ||
3079 | err = set_power_wells(power_domains, hsw_power_wells); | ||
3080 | } else if (IS_BROADWELL(dev_priv)) { | ||
3081 | err = set_power_wells(power_domains, bdw_power_wells); | ||
3082 | } else if (IS_GEN9_BC(dev_priv)) { | ||
3083 | err = set_power_wells(power_domains, skl_power_wells); | ||
3084 | } else if (IS_CANNONLAKE(dev_priv)) { | 3160 | } else if (IS_CANNONLAKE(dev_priv)) { |
3085 | err = set_power_wells(power_domains, cnl_power_wells); | 3161 | err = set_power_wells(power_domains, cnl_power_wells); |
3086 | 3162 | ||
@@ -3092,13 +3168,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
3092 | */ | 3168 | */ |
3093 | if (!IS_CNL_WITH_PORT_F(dev_priv)) | 3169 | if (!IS_CNL_WITH_PORT_F(dev_priv)) |
3094 | power_domains->power_well_count -= 2; | 3170 | power_domains->power_well_count -= 2; |
3095 | |||
3096 | } else if (IS_BROXTON(dev_priv)) { | ||
3097 | err = set_power_wells(power_domains, bxt_power_wells); | ||
3098 | } else if (IS_GEMINILAKE(dev_priv)) { | 3171 | } else if (IS_GEMINILAKE(dev_priv)) { |
3099 | err = set_power_wells(power_domains, glk_power_wells); | 3172 | err = set_power_wells(power_domains, glk_power_wells); |
3173 | } else if (IS_BROXTON(dev_priv)) { | ||
3174 | err = set_power_wells(power_domains, bxt_power_wells); | ||
3175 | } else if (IS_GEN9_BC(dev_priv)) { | ||
3176 | err = set_power_wells(power_domains, skl_power_wells); | ||
3100 | } else if (IS_CHERRYVIEW(dev_priv)) { | 3177 | } else if (IS_CHERRYVIEW(dev_priv)) { |
3101 | err = set_power_wells(power_domains, chv_power_wells); | 3178 | err = set_power_wells(power_domains, chv_power_wells); |
3179 | } else if (IS_BROADWELL(dev_priv)) { | ||
3180 | err = set_power_wells(power_domains, bdw_power_wells); | ||
3181 | } else if (IS_HASWELL(dev_priv)) { | ||
3182 | err = set_power_wells(power_domains, hsw_power_wells); | ||
3102 | } else if (IS_VALLEYVIEW(dev_priv)) { | 3183 | } else if (IS_VALLEYVIEW(dev_priv)) { |
3103 | err = set_power_wells(power_domains, vlv_power_wells); | 3184 | err = set_power_wells(power_domains, vlv_power_wells); |
3104 | } else if (IS_I830(dev_priv)) { | 3185 | } else if (IS_I830(dev_priv)) { |
@@ -3176,8 +3257,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) | |||
3176 | void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, | 3257 | void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, |
3177 | u8 req_slices) | 3258 | u8 req_slices) |
3178 | { | 3259 | { |
3179 | u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; | 3260 | const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; |
3180 | u32 val; | ||
3181 | bool ret; | 3261 | bool ret; |
3182 | 3262 | ||
3183 | if (req_slices > intel_dbuf_max_slices(dev_priv)) { | 3263 | if (req_slices > intel_dbuf_max_slices(dev_priv)) { |
@@ -3188,7 +3268,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, | |||
3188 | if (req_slices == hw_enabled_slices || req_slices == 0) | 3268 | if (req_slices == hw_enabled_slices || req_slices == 0) |
3189 | return; | 3269 | return; |
3190 | 3270 | ||
3191 | val = I915_READ(DBUF_CTL_S2); | ||
3192 | if (req_slices > hw_enabled_slices) | 3271 | if (req_slices > hw_enabled_slices) |
3193 | ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); | 3272 | ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); |
3194 | else | 3273 | else |
@@ -3240,18 +3319,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv) | |||
3240 | I915_WRITE(MBUS_ABOX_CTL, val); | 3319 | I915_WRITE(MBUS_ABOX_CTL, val); |
3241 | } | 3320 | } |
3242 | 3321 | ||
3322 | static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, | ||
3323 | bool enable) | ||
3324 | { | ||
3325 | i915_reg_t reg; | ||
3326 | u32 reset_bits, val; | ||
3327 | |||
3328 | if (IS_IVYBRIDGE(dev_priv)) { | ||
3329 | reg = GEN7_MSG_CTL; | ||
3330 | reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; | ||
3331 | } else { | ||
3332 | reg = HSW_NDE_RSTWRN_OPT; | ||
3333 | reset_bits = RESET_PCH_HANDSHAKE_ENABLE; | ||
3334 | } | ||
3335 | |||
3336 | val = I915_READ(reg); | ||
3337 | |||
3338 | if (enable) | ||
3339 | val |= reset_bits; | ||
3340 | else | ||
3341 | val &= ~reset_bits; | ||
3342 | |||
3343 | I915_WRITE(reg, val); | ||
3344 | } | ||
3345 | |||
3243 | static void skl_display_core_init(struct drm_i915_private *dev_priv, | 3346 | static void skl_display_core_init(struct drm_i915_private *dev_priv, |
3244 | bool resume) | 3347 | bool resume) |
3245 | { | 3348 | { |
3246 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3349 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3247 | struct i915_power_well *well; | 3350 | struct i915_power_well *well; |
3248 | uint32_t val; | ||
3249 | 3351 | ||
3250 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3352 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3251 | 3353 | ||
3252 | /* enable PCH reset handshake */ | 3354 | /* enable PCH reset handshake */ |
3253 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | 3355 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); |
3254 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); | ||
3255 | 3356 | ||
3256 | /* enable PG1 and Misc I/O */ | 3357 | /* enable PG1 and Misc I/O */ |
3257 | mutex_lock(&power_domains->lock); | 3358 | mutex_lock(&power_domains->lock); |
@@ -3307,7 +3408,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, | |||
3307 | { | 3408 | { |
3308 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3409 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3309 | struct i915_power_well *well; | 3410 | struct i915_power_well *well; |
3310 | uint32_t val; | ||
3311 | 3411 | ||
3312 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3412 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3313 | 3413 | ||
@@ -3317,9 +3417,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, | |||
3317 | * Move the handshake programming to initialization sequence. | 3417 | * Move the handshake programming to initialization sequence. |
3318 | * Previously was left up to BIOS. | 3418 | * Previously was left up to BIOS. |
3319 | */ | 3419 | */ |
3320 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | 3420 | intel_pch_reset_handshake(dev_priv, false); |
3321 | val &= ~RESET_PCH_HANDSHAKE_ENABLE; | ||
3322 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val); | ||
3323 | 3421 | ||
3324 | /* Enable PG1 */ | 3422 | /* Enable PG1 */ |
3325 | mutex_lock(&power_domains->lock); | 3423 | mutex_lock(&power_domains->lock); |
@@ -3365,101 +3463,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3365 | usleep_range(10, 30); /* 10 us delay per Bspec */ | 3463 | usleep_range(10, 30); /* 10 us delay per Bspec */ |
3366 | } | 3464 | } |
3367 | 3465 | ||
3368 | enum { | ||
3369 | PROCMON_0_85V_DOT_0, | ||
3370 | PROCMON_0_95V_DOT_0, | ||
3371 | PROCMON_0_95V_DOT_1, | ||
3372 | PROCMON_1_05V_DOT_0, | ||
3373 | PROCMON_1_05V_DOT_1, | ||
3374 | }; | ||
3375 | |||
3376 | static const struct cnl_procmon { | ||
3377 | u32 dw1, dw9, dw10; | ||
3378 | } cnl_procmon_values[] = { | ||
3379 | [PROCMON_0_85V_DOT_0] = | ||
3380 | { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, | ||
3381 | [PROCMON_0_95V_DOT_0] = | ||
3382 | { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, | ||
3383 | [PROCMON_0_95V_DOT_1] = | ||
3384 | { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, | ||
3385 | [PROCMON_1_05V_DOT_0] = | ||
3386 | { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, | ||
3387 | [PROCMON_1_05V_DOT_1] = | ||
3388 | { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, | ||
3389 | }; | ||
3390 | |||
3391 | /* | ||
3392 | * CNL has just one set of registers, while ICL has two sets: one for port A and | ||
3393 | * the other for port B. The CNL registers are equivalent to the ICL port A | ||
3394 | * registers, that's why we call the ICL macros even though the function has CNL | ||
3395 | * on its name. | ||
3396 | */ | ||
3397 | static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, | ||
3398 | enum port port) | ||
3399 | { | ||
3400 | const struct cnl_procmon *procmon; | ||
3401 | u32 val; | ||
3402 | |||
3403 | val = I915_READ(ICL_PORT_COMP_DW3(port)); | ||
3404 | switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { | ||
3405 | default: | ||
3406 | MISSING_CASE(val); | ||
3407 | /* fall through */ | ||
3408 | case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: | ||
3409 | procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; | ||
3410 | break; | ||
3411 | case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: | ||
3412 | procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; | ||
3413 | break; | ||
3414 | case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: | ||
3415 | procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; | ||
3416 | break; | ||
3417 | case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: | ||
3418 | procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; | ||
3419 | break; | ||
3420 | case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: | ||
3421 | procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; | ||
3422 | break; | ||
3423 | } | ||
3424 | |||
3425 | val = I915_READ(ICL_PORT_COMP_DW1(port)); | ||
3426 | val &= ~((0xff << 16) | 0xff); | ||
3427 | val |= procmon->dw1; | ||
3428 | I915_WRITE(ICL_PORT_COMP_DW1(port), val); | ||
3429 | |||
3430 | I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); | ||
3431 | I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); | ||
3432 | } | ||
3433 | |||
3434 | static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) | 3466 | static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) |
3435 | { | 3467 | { |
3436 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3468 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3437 | struct i915_power_well *well; | 3469 | struct i915_power_well *well; |
3438 | u32 val; | ||
3439 | 3470 | ||
3440 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3471 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3441 | 3472 | ||
3442 | /* 1. Enable PCH Reset Handshake */ | 3473 | /* 1. Enable PCH Reset Handshake */ |
3443 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | 3474 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); |
3444 | val |= RESET_PCH_HANDSHAKE_ENABLE; | ||
3445 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val); | ||
3446 | 3475 | ||
3447 | /* 2. Enable Comp */ | 3476 | /* 2-3. */ |
3448 | val = I915_READ(CHICKEN_MISC_2); | 3477 | cnl_combo_phys_init(dev_priv); |
3449 | val &= ~CNL_COMP_PWR_DOWN; | ||
3450 | I915_WRITE(CHICKEN_MISC_2, val); | ||
3451 | |||
3452 | /* Dummy PORT_A to get the correct CNL register from the ICL macro */ | ||
3453 | cnl_set_procmon_ref_values(dev_priv, PORT_A); | ||
3454 | |||
3455 | val = I915_READ(CNL_PORT_COMP_DW0); | ||
3456 | val |= COMP_INIT; | ||
3457 | I915_WRITE(CNL_PORT_COMP_DW0, val); | ||
3458 | |||
3459 | /* 3. */ | ||
3460 | val = I915_READ(CNL_PORT_CL1CM_DW5); | ||
3461 | val |= CL_POWER_DOWN_ENABLE; | ||
3462 | I915_WRITE(CNL_PORT_CL1CM_DW5, val); | ||
3463 | 3478 | ||
3464 | /* | 3479 | /* |
3465 | * 4. Enable Power Well 1 (PG1). | 3480 | * 4. Enable Power Well 1 (PG1). |
@@ -3484,7 +3499,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3484 | { | 3499 | { |
3485 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3500 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3486 | struct i915_power_well *well; | 3501 | struct i915_power_well *well; |
3487 | u32 val; | ||
3488 | 3502 | ||
3489 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3503 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3490 | 3504 | ||
@@ -3508,44 +3522,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3508 | 3522 | ||
3509 | usleep_range(10, 30); /* 10 us delay per Bspec */ | 3523 | usleep_range(10, 30); /* 10 us delay per Bspec */ |
3510 | 3524 | ||
3511 | /* 5. Disable Comp */ | 3525 | /* 5. */ |
3512 | val = I915_READ(CHICKEN_MISC_2); | 3526 | cnl_combo_phys_uninit(dev_priv); |
3513 | val |= CNL_COMP_PWR_DOWN; | ||
3514 | I915_WRITE(CHICKEN_MISC_2, val); | ||
3515 | } | 3527 | } |
3516 | 3528 | ||
3517 | static void icl_display_core_init(struct drm_i915_private *dev_priv, | 3529 | void icl_display_core_init(struct drm_i915_private *dev_priv, |
3518 | bool resume) | 3530 | bool resume) |
3519 | { | 3531 | { |
3520 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3532 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3521 | struct i915_power_well *well; | 3533 | struct i915_power_well *well; |
3522 | enum port port; | ||
3523 | u32 val; | ||
3524 | 3534 | ||
3525 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3535 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3526 | 3536 | ||
3527 | /* 1. Enable PCH reset handshake. */ | 3537 | /* 1. Enable PCH reset handshake. */ |
3528 | val = I915_READ(HSW_NDE_RSTWRN_OPT); | 3538 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); |
3529 | val |= RESET_PCH_HANDSHAKE_ENABLE; | 3539 | |
3530 | I915_WRITE(HSW_NDE_RSTWRN_OPT, val); | 3540 | /* 2-3. */ |
3531 | 3541 | icl_combo_phys_init(dev_priv); | |
3532 | for (port = PORT_A; port <= PORT_B; port++) { | ||
3533 | /* 2. Enable DDI combo PHY comp. */ | ||
3534 | val = I915_READ(ICL_PHY_MISC(port)); | ||
3535 | val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; | ||
3536 | I915_WRITE(ICL_PHY_MISC(port), val); | ||
3537 | |||
3538 | cnl_set_procmon_ref_values(dev_priv, port); | ||
3539 | |||
3540 | val = I915_READ(ICL_PORT_COMP_DW0(port)); | ||
3541 | val |= COMP_INIT; | ||
3542 | I915_WRITE(ICL_PORT_COMP_DW0(port), val); | ||
3543 | |||
3544 | /* 3. Set power down enable. */ | ||
3545 | val = I915_READ(ICL_PORT_CL_DW5(port)); | ||
3546 | val |= CL_POWER_DOWN_ENABLE; | ||
3547 | I915_WRITE(ICL_PORT_CL_DW5(port), val); | ||
3548 | } | ||
3549 | 3542 | ||
3550 | /* | 3543 | /* |
3551 | * 4. Enable Power Well 1 (PG1). | 3544 | * 4. Enable Power Well 1 (PG1). |
@@ -3569,12 +3562,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, | |||
3569 | intel_csr_load_program(dev_priv); | 3562 | intel_csr_load_program(dev_priv); |
3570 | } | 3563 | } |
3571 | 3564 | ||
3572 | static void icl_display_core_uninit(struct drm_i915_private *dev_priv) | 3565 | void icl_display_core_uninit(struct drm_i915_private *dev_priv) |
3573 | { | 3566 | { |
3574 | struct i915_power_domains *power_domains = &dev_priv->power_domains; | 3567 | struct i915_power_domains *power_domains = &dev_priv->power_domains; |
3575 | struct i915_power_well *well; | 3568 | struct i915_power_well *well; |
3576 | enum port port; | ||
3577 | u32 val; | ||
3578 | 3569 | ||
3579 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); | 3570 | gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); |
3580 | 3571 | ||
@@ -3596,12 +3587,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv) | |||
3596 | intel_power_well_disable(dev_priv, well); | 3587 | intel_power_well_disable(dev_priv, well); |
3597 | mutex_unlock(&power_domains->lock); | 3588 | mutex_unlock(&power_domains->lock); |
3598 | 3589 | ||
3599 | /* 5. Disable Comp */ | 3590 | /* 5. */ |
3600 | for (port = PORT_A; port <= PORT_B; port++) { | 3591 | icl_combo_phys_uninit(dev_priv); |
3601 | val = I915_READ(ICL_PHY_MISC(port)); | ||
3602 | val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; | ||
3603 | I915_WRITE(ICL_PHY_MISC(port), val); | ||
3604 | } | ||
3605 | } | 3592 | } |
3606 | 3593 | ||
3607 | static void chv_phy_control_init(struct drm_i915_private *dev_priv) | 3594 | static void chv_phy_control_init(struct drm_i915_private *dev_priv) |
@@ -3759,7 +3746,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) | |||
3759 | mutex_lock(&power_domains->lock); | 3746 | mutex_lock(&power_domains->lock); |
3760 | vlv_cmnlane_wa(dev_priv); | 3747 | vlv_cmnlane_wa(dev_priv); |
3761 | mutex_unlock(&power_domains->lock); | 3748 | mutex_unlock(&power_domains->lock); |
3762 | } | 3749 | } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7) |
3750 | intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); | ||
3763 | 3751 | ||
3764 | /* | 3752 | /* |
3765 | * Keep all power wells enabled for any dependent HW access during | 3753 | * Keep all power wells enabled for any dependent HW access during |
@@ -3953,14 +3941,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv) | |||
3953 | int domains_count; | 3941 | int domains_count; |
3954 | bool enabled; | 3942 | bool enabled; |
3955 | 3943 | ||
3956 | /* | ||
3957 | * Power wells not belonging to any domain (like the MISC_IO | ||
3958 | * and PW1 power wells) are under FW control, so ignore them, | ||
3959 | * since their state can change asynchronously. | ||
3960 | */ | ||
3961 | if (!power_well->desc->domains) | ||
3962 | continue; | ||
3963 | |||
3964 | enabled = power_well->desc->ops->is_enabled(dev_priv, | 3944 | enabled = power_well->desc->ops->is_enabled(dev_priv, |
3965 | power_well); | 3945 | power_well); |
3966 | if ((power_well->count || power_well->desc->always_on) != | 3946 | if ((power_well->count || power_well->desc->always_on) != |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 701372e512a8..5805ec1aba12 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -105,11 +105,6 @@ struct intel_sdvo { | |||
105 | bool has_hdmi_audio; | 105 | bool has_hdmi_audio; |
106 | bool rgb_quant_range_selectable; | 106 | bool rgb_quant_range_selectable; |
107 | 107 | ||
108 | /** | ||
109 | * This is sdvo fixed pannel mode pointer | ||
110 | */ | ||
111 | struct drm_display_mode *sdvo_lvds_fixed_mode; | ||
112 | |||
113 | /* DDC bus used by this SDVO encoder */ | 108 | /* DDC bus used by this SDVO encoder */ |
114 | uint8_t ddc_bus; | 109 | uint8_t ddc_bus; |
115 | 110 | ||
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo, | |||
765 | args.height = height; | 760 | args.height = height; |
766 | args.interlace = 0; | 761 | args.interlace = 0; |
767 | 762 | ||
768 | if (IS_LVDS(intel_sdvo_connector) && | 763 | if (IS_LVDS(intel_sdvo_connector)) { |
769 | (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width || | 764 | const struct drm_display_mode *fixed_mode = |
770 | intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height)) | 765 | intel_sdvo_connector->base.panel.fixed_mode; |
771 | args.scaled = 1; | 766 | |
767 | if (fixed_mode->hdisplay != width || | ||
768 | fixed_mode->vdisplay != height) | ||
769 | args.scaled = 1; | ||
770 | } | ||
772 | 771 | ||
773 | return intel_sdvo_set_value(intel_sdvo, | 772 | return intel_sdvo_set_value(intel_sdvo, |
774 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, | 773 | SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, |
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, | |||
1123 | 1122 | ||
1124 | DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); | 1123 | DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n"); |
1125 | pipe_config->pipe_bpp = 8*3; | 1124 | pipe_config->pipe_bpp = 8*3; |
1125 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
1126 | 1126 | ||
1127 | if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) | 1127 | if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) |
1128 | pipe_config->has_pch_encoder = true; | 1128 | pipe_config->has_pch_encoder = true; |
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, | |||
1144 | pipe_config->sdvo_tv_clock = true; | 1144 | pipe_config->sdvo_tv_clock = true; |
1145 | } else if (IS_LVDS(intel_sdvo_connector)) { | 1145 | } else if (IS_LVDS(intel_sdvo_connector)) { |
1146 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, | 1146 | if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, |
1147 | intel_sdvo->sdvo_lvds_fixed_mode)) | 1147 | intel_sdvo_connector->base.panel.fixed_mode)) |
1148 | return false; | 1148 | return false; |
1149 | 1149 | ||
1150 | (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, | 1150 | (void) intel_sdvo_get_preferred_input_mode(intel_sdvo, |
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, | |||
1301 | /* lvds has a special fixed output timing. */ | 1301 | /* lvds has a special fixed output timing. */ |
1302 | if (IS_LVDS(intel_sdvo_connector)) | 1302 | if (IS_LVDS(intel_sdvo_connector)) |
1303 | intel_sdvo_get_dtd_from_mode(&output_dtd, | 1303 | intel_sdvo_get_dtd_from_mode(&output_dtd, |
1304 | intel_sdvo->sdvo_lvds_fixed_mode); | 1304 | intel_sdvo_connector->base.panel.fixed_mode); |
1305 | else | 1305 | else |
1306 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); | 1306 | intel_sdvo_get_dtd_from_mode(&output_dtd, mode); |
1307 | if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) | 1307 | if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd)) |
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector, | |||
1642 | return MODE_CLOCK_HIGH; | 1642 | return MODE_CLOCK_HIGH; |
1643 | 1643 | ||
1644 | if (IS_LVDS(intel_sdvo_connector)) { | 1644 | if (IS_LVDS(intel_sdvo_connector)) { |
1645 | if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) | 1645 | const struct drm_display_mode *fixed_mode = |
1646 | intel_sdvo_connector->base.panel.fixed_mode; | ||
1647 | |||
1648 | if (mode->hdisplay > fixed_mode->hdisplay) | ||
1646 | return MODE_PANEL; | 1649 | return MODE_PANEL; |
1647 | 1650 | ||
1648 | if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay) | 1651 | if (mode->vdisplay > fixed_mode->vdisplay) |
1649 | return MODE_PANEL; | 1652 | return MODE_PANEL; |
1650 | } | 1653 | } |
1651 | 1654 | ||
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) | |||
2058 | return !list_empty(&connector->probed_modes); | 2061 | return !list_empty(&connector->probed_modes); |
2059 | } | 2062 | } |
2060 | 2063 | ||
2061 | static void intel_sdvo_destroy(struct drm_connector *connector) | ||
2062 | { | ||
2063 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | ||
2064 | |||
2065 | drm_connector_cleanup(connector); | ||
2066 | kfree(intel_sdvo_connector); | ||
2067 | } | ||
2068 | |||
2069 | static int | 2064 | static int |
2070 | intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, | 2065 | intel_sdvo_connector_atomic_get_property(struct drm_connector *connector, |
2071 | const struct drm_connector_state *state, | 2066 | const struct drm_connector_state *state, |
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { | |||
2228 | .atomic_set_property = intel_sdvo_connector_atomic_set_property, | 2223 | .atomic_set_property = intel_sdvo_connector_atomic_set_property, |
2229 | .late_register = intel_sdvo_connector_register, | 2224 | .late_register = intel_sdvo_connector_register, |
2230 | .early_unregister = intel_sdvo_connector_unregister, | 2225 | .early_unregister = intel_sdvo_connector_unregister, |
2231 | .destroy = intel_sdvo_destroy, | 2226 | .destroy = intel_connector_destroy, |
2232 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 2227 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
2233 | .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, | 2228 | .atomic_duplicate_state = intel_sdvo_connector_duplicate_state, |
2234 | }; | 2229 | }; |
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) | |||
2267 | { | 2262 | { |
2268 | struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); | 2263 | struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); |
2269 | 2264 | ||
2270 | if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) | ||
2271 | drm_mode_destroy(encoder->dev, | ||
2272 | intel_sdvo->sdvo_lvds_fixed_mode); | ||
2273 | |||
2274 | i2c_del_adapter(&intel_sdvo->ddc); | 2265 | i2c_del_adapter(&intel_sdvo->ddc); |
2275 | intel_encoder_destroy(encoder); | 2266 | intel_encoder_destroy(encoder); |
2276 | } | 2267 | } |
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) | |||
2583 | return true; | 2574 | return true; |
2584 | 2575 | ||
2585 | err: | 2576 | err: |
2586 | intel_sdvo_destroy(connector); | 2577 | intel_connector_destroy(connector); |
2587 | return false; | 2578 | return false; |
2588 | } | 2579 | } |
2589 | 2580 | ||
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) | |||
2663 | 2654 | ||
2664 | list_for_each_entry(mode, &connector->probed_modes, head) { | 2655 | list_for_each_entry(mode, &connector->probed_modes, head) { |
2665 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | 2656 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { |
2666 | intel_sdvo->sdvo_lvds_fixed_mode = | 2657 | struct drm_display_mode *fixed_mode = |
2667 | drm_mode_duplicate(connector->dev, mode); | 2658 | drm_mode_duplicate(connector->dev, mode); |
2659 | |||
2660 | intel_panel_init(&intel_connector->panel, | ||
2661 | fixed_mode, NULL); | ||
2668 | break; | 2662 | break; |
2669 | } | 2663 | } |
2670 | } | 2664 | } |
2671 | 2665 | ||
2672 | if (!intel_sdvo->sdvo_lvds_fixed_mode) | 2666 | if (!intel_connector->panel.fixed_mode) |
2673 | goto err; | 2667 | goto err; |
2674 | 2668 | ||
2675 | return true; | 2669 | return true; |
2676 | 2670 | ||
2677 | err: | 2671 | err: |
2678 | intel_sdvo_destroy(connector); | 2672 | intel_connector_destroy(connector); |
2679 | return false; | 2673 | return false; |
2680 | } | 2674 | } |
2681 | 2675 | ||
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) | |||
2745 | &dev->mode_config.connector_list, head) { | 2739 | &dev->mode_config.connector_list, head) { |
2746 | if (intel_attached_encoder(connector) == &intel_sdvo->base) { | 2740 | if (intel_attached_encoder(connector) == &intel_sdvo->base) { |
2747 | drm_connector_unregister(connector); | 2741 | drm_connector_unregister(connector); |
2748 | intel_sdvo_destroy(connector); | 2742 | intel_connector_destroy(connector); |
2749 | } | 2743 | } |
2750 | } | 2744 | } |
2751 | } | 2745 | } |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 5fd2f7bf3927..abe193815ccc 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include "intel_frontbuffer.h" | 40 | #include "intel_frontbuffer.h" |
41 | #include <drm/i915_drm.h> | 41 | #include <drm/i915_drm.h> |
42 | #include "i915_drv.h" | 42 | #include "i915_drv.h" |
43 | #include <drm/drm_color_mgmt.h> | ||
43 | 44 | ||
44 | int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, | 45 | int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, |
45 | int usecs) | 46 | int usecs) |
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) | |||
275 | src->y2 = (src_y + src_h) << 16; | 276 | src->y2 = (src_y + src_h) << 16; |
276 | 277 | ||
277 | if (fb->format->is_yuv && | 278 | if (fb->format->is_yuv && |
278 | fb->format->format != DRM_FORMAT_NV12 && | ||
279 | (src_x & 1 || src_w & 1)) { | 279 | (src_x & 1 || src_w & 1)) { |
280 | DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", | 280 | DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", |
281 | src_x, src_w); | 281 | src_x, src_w); |
282 | return -EINVAL; | 282 | return -EINVAL; |
283 | } | 283 | } |
284 | 284 | ||
285 | if (fb->format->is_yuv && | ||
286 | fb->format->num_planes > 1 && | ||
287 | (src_y & 1 || src_h & 1)) { | ||
288 | DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n", | ||
289 | src_y, src_h); | ||
290 | return -EINVAL; | ||
291 | } | ||
292 | |||
285 | return 0; | 293 | return 0; |
286 | } | 294 | } |
287 | 295 | ||
288 | unsigned int | 296 | static unsigned int |
289 | skl_plane_max_stride(struct intel_plane *plane, | 297 | skl_plane_max_stride(struct intel_plane *plane, |
290 | u32 pixel_format, u64 modifier, | 298 | u32 pixel_format, u64 modifier, |
291 | unsigned int rotation) | 299 | unsigned int rotation) |
@@ -302,35 +310,201 @@ skl_plane_max_stride(struct intel_plane *plane, | |||
302 | return min(8192 * cpp, 32768); | 310 | return min(8192 * cpp, 32768); |
303 | } | 311 | } |
304 | 312 | ||
305 | void | 313 | static void |
306 | skl_update_plane(struct intel_plane *plane, | 314 | skl_program_scaler(struct intel_plane *plane, |
307 | const struct intel_crtc_state *crtc_state, | 315 | const struct intel_crtc_state *crtc_state, |
308 | const struct intel_plane_state *plane_state) | 316 | const struct intel_plane_state *plane_state) |
317 | { | ||
318 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | ||
319 | enum pipe pipe = plane->pipe; | ||
320 | int scaler_id = plane_state->scaler_id; | ||
321 | const struct intel_scaler *scaler = | ||
322 | &crtc_state->scaler_state.scalers[scaler_id]; | ||
323 | int crtc_x = plane_state->base.dst.x1; | ||
324 | int crtc_y = plane_state->base.dst.y1; | ||
325 | uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); | ||
326 | uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); | ||
327 | u16 y_hphase, uv_rgb_hphase; | ||
328 | u16 y_vphase, uv_rgb_vphase; | ||
329 | int hscale, vscale; | ||
330 | |||
331 | hscale = drm_rect_calc_hscale(&plane_state->base.src, | ||
332 | &plane_state->base.dst, | ||
333 | 0, INT_MAX); | ||
334 | vscale = drm_rect_calc_vscale(&plane_state->base.src, | ||
335 | &plane_state->base.dst, | ||
336 | 0, INT_MAX); | ||
337 | |||
338 | /* TODO: handle sub-pixel coordinates */ | ||
339 | if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 && | ||
340 | !icl_is_hdr_plane(plane)) { | ||
341 | y_hphase = skl_scaler_calc_phase(1, hscale, false); | ||
342 | y_vphase = skl_scaler_calc_phase(1, vscale, false); | ||
343 | |||
344 | /* MPEG2 chroma siting convention */ | ||
345 | uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true); | ||
346 | uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false); | ||
347 | } else { | ||
348 | /* not used */ | ||
349 | y_hphase = 0; | ||
350 | y_vphase = 0; | ||
351 | |||
352 | uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false); | ||
353 | uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false); | ||
354 | } | ||
355 | |||
356 | I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), | ||
357 | PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode); | ||
358 | I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), | ||
359 | PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); | ||
360 | I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), | ||
361 | PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); | ||
362 | I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); | ||
363 | I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h); | ||
364 | } | ||
365 | |||
366 | /* Preoffset values for YUV to RGB Conversion */ | ||
367 | #define PREOFF_YUV_TO_RGB_HI 0x1800 | ||
368 | #define PREOFF_YUV_TO_RGB_ME 0x1F00 | ||
369 | #define PREOFF_YUV_TO_RGB_LO 0x1800 | ||
370 | |||
371 | #define ROFF(x) (((x) & 0xffff) << 16) | ||
372 | #define GOFF(x) (((x) & 0xffff) << 0) | ||
373 | #define BOFF(x) (((x) & 0xffff) << 16) | ||
374 | |||
375 | static void | ||
376 | icl_program_input_csc_coeff(const struct intel_crtc_state *crtc_state, | ||
377 | const struct intel_plane_state *plane_state) | ||
378 | { | ||
379 | struct drm_i915_private *dev_priv = | ||
380 | to_i915(plane_state->base.plane->dev); | ||
381 | struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); | ||
382 | enum pipe pipe = crtc->pipe; | ||
383 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
384 | enum plane_id plane_id = plane->id; | ||
385 | |||
386 | static const u16 input_csc_matrix[][9] = { | ||
387 | /* | ||
388 | * BT.601 full range YCbCr -> full range RGB | ||
389 | * The matrix required is : | ||
390 | * [1.000, 0.000, 1.371, | ||
391 | * 1.000, -0.336, -0.698, | ||
392 | * 1.000, 1.732, 0.0000] | ||
393 | */ | ||
394 | [DRM_COLOR_YCBCR_BT601] = { | ||
395 | 0x7AF8, 0x7800, 0x0, | ||
396 | 0x8B28, 0x7800, 0x9AC0, | ||
397 | 0x0, 0x7800, 0x7DD8, | ||
398 | }, | ||
399 | /* | ||
400 | * BT.709 full range YCbCr -> full range RGB | ||
401 | * The matrix required is : | ||
402 | * [1.000, 0.000, 1.574, | ||
403 | * 1.000, -0.187, -0.468, | ||
404 | * 1.000, 1.855, 0.0000] | ||
405 | */ | ||
406 | [DRM_COLOR_YCBCR_BT709] = { | ||
407 | 0x7C98, 0x7800, 0x0, | ||
408 | 0x9EF8, 0x7800, 0xABF8, | ||
409 | 0x0, 0x7800, 0x7ED8, | ||
410 | }, | ||
411 | }; | ||
412 | |||
413 | /* Matrix for Limited Range to Full Range Conversion */ | ||
414 | static const u16 input_csc_matrix_lr[][9] = { | ||
415 | /* | ||
416 | * BT.601 Limted range YCbCr -> full range RGB | ||
417 | * The matrix required is : | ||
418 | * [1.164384, 0.000, 1.596370, | ||
419 | * 1.138393, -0.382500, -0.794598, | ||
420 | * 1.138393, 1.971696, 0.0000] | ||
421 | */ | ||
422 | [DRM_COLOR_YCBCR_BT601] = { | ||
423 | 0x7CC8, 0x7950, 0x0, | ||
424 | 0x8CB8, 0x7918, 0x9C40, | ||
425 | 0x0, 0x7918, 0x7FC8, | ||
426 | }, | ||
427 | /* | ||
428 | * BT.709 Limited range YCbCr -> full range RGB | ||
429 | * The matrix required is : | ||
430 | * [1.164, 0.000, 1.833671, | ||
431 | * 1.138393, -0.213249, -0.532909, | ||
432 | * 1.138393, 2.112402, 0.0000] | ||
433 | */ | ||
434 | [DRM_COLOR_YCBCR_BT709] = { | ||
435 | 0x7EA8, 0x7950, 0x0, | ||
436 | 0x8888, 0x7918, 0xADA8, | ||
437 | 0x0, 0x7918, 0x6870, | ||
438 | }, | ||
439 | }; | ||
440 | const u16 *csc; | ||
441 | |||
442 | if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE) | ||
443 | csc = input_csc_matrix[plane_state->base.color_encoding]; | ||
444 | else | ||
445 | csc = input_csc_matrix_lr[plane_state->base.color_encoding]; | ||
446 | |||
447 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | | ||
448 | GOFF(csc[1])); | ||
449 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2])); | ||
450 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) | | ||
451 | GOFF(csc[4])); | ||
452 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5])); | ||
453 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) | | ||
454 | GOFF(csc[7])); | ||
455 | I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8])); | ||
456 | |||
457 | I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), | ||
458 | PREOFF_YUV_TO_RGB_HI); | ||
459 | I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), | ||
460 | PREOFF_YUV_TO_RGB_ME); | ||
461 | I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), | ||
462 | PREOFF_YUV_TO_RGB_LO); | ||
463 | I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0); | ||
464 | I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0); | ||
465 | I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0); | ||
466 | } | ||
467 | |||
468 | static void | ||
469 | skl_program_plane(struct intel_plane *plane, | ||
470 | const struct intel_crtc_state *crtc_state, | ||
471 | const struct intel_plane_state *plane_state, | ||
472 | int color_plane, bool slave, u32 plane_ctl) | ||
309 | { | 473 | { |
310 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 474 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
311 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
312 | enum plane_id plane_id = plane->id; | 475 | enum plane_id plane_id = plane->id; |
313 | enum pipe pipe = plane->pipe; | 476 | enum pipe pipe = plane->pipe; |
314 | u32 plane_ctl = plane_state->ctl; | ||
315 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; | 477 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; |
316 | u32 surf_addr = plane_state->color_plane[0].offset; | 478 | u32 surf_addr = plane_state->color_plane[color_plane].offset; |
317 | u32 stride = skl_plane_stride(plane_state, 0); | 479 | u32 stride = skl_plane_stride(plane_state, color_plane); |
318 | u32 aux_stride = skl_plane_stride(plane_state, 1); | 480 | u32 aux_stride = skl_plane_stride(plane_state, 1); |
319 | int crtc_x = plane_state->base.dst.x1; | 481 | int crtc_x = plane_state->base.dst.x1; |
320 | int crtc_y = plane_state->base.dst.y1; | 482 | int crtc_y = plane_state->base.dst.y1; |
321 | uint32_t crtc_w = drm_rect_width(&plane_state->base.dst); | 483 | uint32_t x = plane_state->color_plane[color_plane].x; |
322 | uint32_t crtc_h = drm_rect_height(&plane_state->base.dst); | 484 | uint32_t y = plane_state->color_plane[color_plane].y; |
323 | uint32_t x = plane_state->color_plane[0].x; | ||
324 | uint32_t y = plane_state->color_plane[0].y; | ||
325 | uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; | 485 | uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; |
326 | uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; | 486 | uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; |
487 | struct intel_plane *linked = plane_state->linked_plane; | ||
488 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
489 | u8 alpha = plane_state->base.alpha >> 8; | ||
327 | unsigned long irqflags; | 490 | unsigned long irqflags; |
491 | u32 keymsk, keymax; | ||
328 | 492 | ||
329 | /* Sizes are 0 based */ | 493 | /* Sizes are 0 based */ |
330 | src_w--; | 494 | src_w--; |
331 | src_h--; | 495 | src_h--; |
332 | crtc_w--; | 496 | |
333 | crtc_h--; | 497 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
498 | |||
499 | keymsk = key->channel_mask & 0x3ffffff; | ||
500 | if (alpha < 0xff) | ||
501 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | ||
502 | |||
503 | /* The scaler will handle the output position */ | ||
504 | if (plane_state->scaler_id >= 0) { | ||
505 | crtc_x = 0; | ||
506 | crtc_y = 0; | ||
507 | } | ||
334 | 508 | ||
335 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 509 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
336 | 510 | ||
@@ -338,71 +512,83 @@ skl_update_plane(struct intel_plane *plane, | |||
338 | I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), | 512 | I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), |
339 | plane_state->color_ctl); | 513 | plane_state->color_ctl); |
340 | 514 | ||
341 | if (key->flags) { | 515 | if (fb->format->is_yuv && icl_is_hdr_plane(plane)) |
342 | I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); | 516 | icl_program_input_csc_coeff(crtc_state, plane_state); |
343 | I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value); | 517 | |
344 | I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask); | 518 | I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value); |
345 | } | 519 | I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax); |
520 | I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk); | ||
346 | 521 | ||
347 | I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); | 522 | I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x); |
348 | I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); | 523 | I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); |
349 | I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); | 524 | I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); |
350 | I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), | 525 | I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), |
351 | (plane_state->color_plane[1].offset - surf_addr) | aux_stride); | 526 | (plane_state->color_plane[1].offset - surf_addr) | aux_stride); |
352 | I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), | ||
353 | (plane_state->color_plane[1].y << 16) | | ||
354 | plane_state->color_plane[1].x); | ||
355 | 527 | ||
356 | /* program plane scaler */ | 528 | if (INTEL_GEN(dev_priv) < 11) |
357 | if (plane_state->scaler_id >= 0) { | 529 | I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id), |
358 | int scaler_id = plane_state->scaler_id; | 530 | (plane_state->color_plane[1].y << 16) | |
359 | const struct intel_scaler *scaler = | 531 | plane_state->color_plane[1].x); |
360 | &crtc_state->scaler_state.scalers[scaler_id]; | 532 | |
361 | u16 y_hphase, uv_rgb_hphase; | 533 | if (icl_is_hdr_plane(plane)) { |
362 | u16 y_vphase, uv_rgb_vphase; | 534 | u32 cus_ctl = 0; |
363 | 535 | ||
364 | /* TODO: handle sub-pixel coordinates */ | 536 | if (linked) { |
365 | if (fb->format->format == DRM_FORMAT_NV12) { | 537 | /* Enable and use MPEG-2 chroma siting */ |
366 | y_hphase = skl_scaler_calc_phase(1, false); | 538 | cus_ctl = PLANE_CUS_ENABLE | |
367 | y_vphase = skl_scaler_calc_phase(1, false); | 539 | PLANE_CUS_HPHASE_0 | |
368 | 540 | PLANE_CUS_VPHASE_SIGN_NEGATIVE | | |
369 | /* MPEG2 chroma siting convention */ | 541 | PLANE_CUS_VPHASE_0_25; |
370 | uv_rgb_hphase = skl_scaler_calc_phase(2, true); | 542 | |
371 | uv_rgb_vphase = skl_scaler_calc_phase(2, false); | 543 | if (linked->id == PLANE_SPRITE5) |
372 | } else { | 544 | cus_ctl |= PLANE_CUS_PLANE_7; |
373 | /* not used */ | 545 | else if (linked->id == PLANE_SPRITE4) |
374 | y_hphase = 0; | 546 | cus_ctl |= PLANE_CUS_PLANE_6; |
375 | y_vphase = 0; | 547 | else |
376 | 548 | MISSING_CASE(linked->id); | |
377 | uv_rgb_hphase = skl_scaler_calc_phase(1, false); | ||
378 | uv_rgb_vphase = skl_scaler_calc_phase(1, false); | ||
379 | } | 549 | } |
380 | 550 | ||
381 | I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), | 551 | I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl); |
382 | PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode); | ||
383 | I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); | ||
384 | I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id), | ||
385 | PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase)); | ||
386 | I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id), | ||
387 | PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase)); | ||
388 | I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); | ||
389 | I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), | ||
390 | ((crtc_w + 1) << 16)|(crtc_h + 1)); | ||
391 | |||
392 | I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); | ||
393 | } else { | ||
394 | I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); | ||
395 | } | 552 | } |
396 | 553 | ||
554 | if (!slave && plane_state->scaler_id >= 0) | ||
555 | skl_program_scaler(plane, crtc_state, plane_state); | ||
556 | |||
557 | I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); | ||
558 | |||
397 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); | 559 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); |
398 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), | 560 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), |
399 | intel_plane_ggtt_offset(plane_state) + surf_addr); | 561 | intel_plane_ggtt_offset(plane_state) + surf_addr); |
400 | POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); | ||
401 | 562 | ||
402 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 563 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
403 | } | 564 | } |
404 | 565 | ||
405 | void | 566 | static void |
567 | skl_update_plane(struct intel_plane *plane, | ||
568 | const struct intel_crtc_state *crtc_state, | ||
569 | const struct intel_plane_state *plane_state) | ||
570 | { | ||
571 | int color_plane = 0; | ||
572 | |||
573 | if (plane_state->linked_plane) { | ||
574 | /* Program the UV plane */ | ||
575 | color_plane = 1; | ||
576 | } | ||
577 | |||
578 | skl_program_plane(plane, crtc_state, plane_state, | ||
579 | color_plane, false, plane_state->ctl); | ||
580 | } | ||
581 | |||
582 | static void | ||
583 | icl_update_slave(struct intel_plane *plane, | ||
584 | const struct intel_crtc_state *crtc_state, | ||
585 | const struct intel_plane_state *plane_state) | ||
586 | { | ||
587 | skl_program_plane(plane, crtc_state, plane_state, 0, true, | ||
588 | plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE); | ||
589 | } | ||
590 | |||
591 | static void | ||
406 | skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | 592 | skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) |
407 | { | 593 | { |
408 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 594 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
@@ -413,14 +599,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
413 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 599 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
414 | 600 | ||
415 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); | 601 | I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); |
416 | |||
417 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); | 602 | I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); |
418 | POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); | ||
419 | 603 | ||
420 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 604 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
421 | } | 605 | } |
422 | 606 | ||
423 | bool | 607 | static bool |
424 | skl_plane_get_hw_state(struct intel_plane *plane, | 608 | skl_plane_get_hw_state(struct intel_plane *plane, |
425 | enum pipe *pipe) | 609 | enum pipe *pipe) |
426 | { | 610 | { |
@@ -613,7 +797,6 @@ vlv_update_plane(struct intel_plane *plane, | |||
613 | const struct intel_plane_state *plane_state) | 797 | const struct intel_plane_state *plane_state) |
614 | { | 798 | { |
615 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 799 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
616 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
617 | enum pipe pipe = plane->pipe; | 800 | enum pipe pipe = plane->pipe; |
618 | enum plane_id plane_id = plane->id; | 801 | enum plane_id plane_id = plane->id; |
619 | u32 sprctl = plane_state->ctl; | 802 | u32 sprctl = plane_state->ctl; |
@@ -650,10 +833,8 @@ vlv_update_plane(struct intel_plane *plane, | |||
650 | plane_state->color_plane[0].stride); | 833 | plane_state->color_plane[0].stride); |
651 | I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); | 834 | I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); |
652 | 835 | ||
653 | if (fb->modifier == I915_FORMAT_MOD_X_TILED) | 836 | I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); |
654 | I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x); | 837 | I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); |
655 | else | ||
656 | I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset); | ||
657 | 838 | ||
658 | I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); | 839 | I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0); |
659 | 840 | ||
@@ -661,7 +842,6 @@ vlv_update_plane(struct intel_plane *plane, | |||
661 | I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); | 842 | I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl); |
662 | I915_WRITE_FW(SPSURF(pipe, plane_id), | 843 | I915_WRITE_FW(SPSURF(pipe, plane_id), |
663 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); | 844 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
664 | POSTING_READ_FW(SPSURF(pipe, plane_id)); | ||
665 | 845 | ||
666 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 846 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
667 | } | 847 | } |
@@ -677,9 +857,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
677 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | 857 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); |
678 | 858 | ||
679 | I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); | 859 | I915_WRITE_FW(SPCNTR(pipe, plane_id), 0); |
680 | |||
681 | I915_WRITE_FW(SPSURF(pipe, plane_id), 0); | 860 | I915_WRITE_FW(SPSURF(pipe, plane_id), 0); |
682 | POSTING_READ_FW(SPSURF(pipe, plane_id)); | ||
683 | 861 | ||
684 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 862 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
685 | } | 863 | } |
@@ -774,7 +952,6 @@ ivb_update_plane(struct intel_plane *plane, | |||
774 | const struct intel_plane_state *plane_state) | 952 | const struct intel_plane_state *plane_state) |
775 | { | 953 | { |
776 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 954 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
777 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
778 | enum pipe pipe = plane->pipe; | 955 | enum pipe pipe = plane->pipe; |
779 | u32 sprctl = plane_state->ctl, sprscale = 0; | 956 | u32 sprctl = plane_state->ctl, sprscale = 0; |
780 | u32 sprsurf_offset = plane_state->color_plane[0].offset; | 957 | u32 sprsurf_offset = plane_state->color_plane[0].offset; |
@@ -814,12 +991,12 @@ ivb_update_plane(struct intel_plane *plane, | |||
814 | 991 | ||
815 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET | 992 | /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET |
816 | * register */ | 993 | * register */ |
817 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | 994 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
818 | I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); | 995 | I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x); |
819 | else if (fb->modifier == I915_FORMAT_MOD_X_TILED) | 996 | } else { |
820 | I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); | 997 | I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x); |
821 | else | ||
822 | I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); | 998 | I915_WRITE_FW(SPRLINOFF(pipe), linear_offset); |
999 | } | ||
823 | 1000 | ||
824 | I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); | 1001 | I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); |
825 | if (IS_IVYBRIDGE(dev_priv)) | 1002 | if (IS_IVYBRIDGE(dev_priv)) |
@@ -827,7 +1004,6 @@ ivb_update_plane(struct intel_plane *plane, | |||
827 | I915_WRITE_FW(SPRCTL(pipe), sprctl); | 1004 | I915_WRITE_FW(SPRCTL(pipe), sprctl); |
828 | I915_WRITE_FW(SPRSURF(pipe), | 1005 | I915_WRITE_FW(SPRSURF(pipe), |
829 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); | 1006 | intel_plane_ggtt_offset(plane_state) + sprsurf_offset); |
830 | POSTING_READ_FW(SPRSURF(pipe)); | ||
831 | 1007 | ||
832 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 1008 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
833 | } | 1009 | } |
@@ -845,9 +1021,7 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
845 | /* Can't leave the scaler enabled... */ | 1021 | /* Can't leave the scaler enabled... */ |
846 | if (IS_IVYBRIDGE(dev_priv)) | 1022 | if (IS_IVYBRIDGE(dev_priv)) |
847 | I915_WRITE_FW(SPRSCALE(pipe), 0); | 1023 | I915_WRITE_FW(SPRSCALE(pipe), 0); |
848 | |||
849 | I915_WRITE_FW(SPRSURF(pipe), 0); | 1024 | I915_WRITE_FW(SPRSURF(pipe), 0); |
850 | POSTING_READ_FW(SPRSURF(pipe)); | ||
851 | 1025 | ||
852 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 1026 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
853 | } | 1027 | } |
@@ -946,7 +1120,6 @@ g4x_update_plane(struct intel_plane *plane, | |||
946 | const struct intel_plane_state *plane_state) | 1120 | const struct intel_plane_state *plane_state) |
947 | { | 1121 | { |
948 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 1122 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
949 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
950 | enum pipe pipe = plane->pipe; | 1123 | enum pipe pipe = plane->pipe; |
951 | u32 dvscntr = plane_state->ctl, dvsscale = 0; | 1124 | u32 dvscntr = plane_state->ctl, dvsscale = 0; |
952 | u32 dvssurf_offset = plane_state->color_plane[0].offset; | 1125 | u32 dvssurf_offset = plane_state->color_plane[0].offset; |
@@ -984,17 +1157,14 @@ g4x_update_plane(struct intel_plane *plane, | |||
984 | I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); | 1157 | I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride); |
985 | I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); | 1158 | I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x); |
986 | 1159 | ||
987 | if (fb->modifier == I915_FORMAT_MOD_X_TILED) | 1160 | I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); |
988 | I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x); | 1161 | I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); |
989 | else | ||
990 | I915_WRITE_FW(DVSLINOFF(pipe), linear_offset); | ||
991 | 1162 | ||
992 | I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); | 1163 | I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); |
993 | I915_WRITE_FW(DVSSCALE(pipe), dvsscale); | 1164 | I915_WRITE_FW(DVSSCALE(pipe), dvsscale); |
994 | I915_WRITE_FW(DVSCNTR(pipe), dvscntr); | 1165 | I915_WRITE_FW(DVSCNTR(pipe), dvscntr); |
995 | I915_WRITE_FW(DVSSURF(pipe), | 1166 | I915_WRITE_FW(DVSSURF(pipe), |
996 | intel_plane_ggtt_offset(plane_state) + dvssurf_offset); | 1167 | intel_plane_ggtt_offset(plane_state) + dvssurf_offset); |
997 | POSTING_READ_FW(DVSSURF(pipe)); | ||
998 | 1168 | ||
999 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 1169 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
1000 | } | 1170 | } |
@@ -1011,9 +1181,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) | |||
1011 | I915_WRITE_FW(DVSCNTR(pipe), 0); | 1181 | I915_WRITE_FW(DVSCNTR(pipe), 0); |
1012 | /* Disable the scaler */ | 1182 | /* Disable the scaler */ |
1013 | I915_WRITE_FW(DVSSCALE(pipe), 0); | 1183 | I915_WRITE_FW(DVSSCALE(pipe), 0); |
1014 | |||
1015 | I915_WRITE_FW(DVSSURF(pipe), 0); | 1184 | I915_WRITE_FW(DVSSURF(pipe), 0); |
1016 | POSTING_READ_FW(DVSSURF(pipe)); | ||
1017 | 1185 | ||
1018 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 1186 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
1019 | } | 1187 | } |
@@ -1039,6 +1207,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane, | |||
1039 | return ret; | 1207 | return ret; |
1040 | } | 1208 | } |
1041 | 1209 | ||
1210 | static bool intel_fb_scalable(const struct drm_framebuffer *fb) | ||
1211 | { | ||
1212 | if (!fb) | ||
1213 | return false; | ||
1214 | |||
1215 | switch (fb->format->format) { | ||
1216 | case DRM_FORMAT_C8: | ||
1217 | return false; | ||
1218 | default: | ||
1219 | return true; | ||
1220 | } | ||
1221 | } | ||
1222 | |||
1042 | static int | 1223 | static int |
1043 | g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, | 1224 | g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, |
1044 | struct intel_plane_state *plane_state) | 1225 | struct intel_plane_state *plane_state) |
@@ -1106,18 +1287,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, | |||
1106 | { | 1287 | { |
1107 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | 1288 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); |
1108 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 1289 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
1109 | int max_scale, min_scale; | 1290 | int min_scale = DRM_PLANE_HELPER_NO_SCALING; |
1291 | int max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1110 | int ret; | 1292 | int ret; |
1111 | 1293 | ||
1112 | if (INTEL_GEN(dev_priv) < 7) { | 1294 | if (intel_fb_scalable(plane_state->base.fb)) { |
1113 | min_scale = 1; | 1295 | if (INTEL_GEN(dev_priv) < 7) { |
1114 | max_scale = 16 << 16; | 1296 | min_scale = 1; |
1115 | } else if (IS_IVYBRIDGE(dev_priv)) { | 1297 | max_scale = 16 << 16; |
1116 | min_scale = 1; | 1298 | } else if (IS_IVYBRIDGE(dev_priv)) { |
1117 | max_scale = 2 << 16; | 1299 | min_scale = 1; |
1118 | } else { | 1300 | max_scale = 2 << 16; |
1119 | min_scale = DRM_PLANE_HELPER_NO_SCALING; | 1301 | } |
1120 | max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1121 | } | 1302 | } |
1122 | 1303 | ||
1123 | ret = drm_atomic_helper_check_plane_state(&plane_state->base, | 1304 | ret = drm_atomic_helper_check_plane_state(&plane_state->base, |
@@ -1204,6 +1385,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, | |||
1204 | static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, | 1385 | static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, |
1205 | const struct intel_plane_state *plane_state) | 1386 | const struct intel_plane_state *plane_state) |
1206 | { | 1387 | { |
1388 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | ||
1389 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | ||
1207 | const struct drm_framebuffer *fb = plane_state->base.fb; | 1390 | const struct drm_framebuffer *fb = plane_state->base.fb; |
1208 | unsigned int rotation = plane_state->base.rotation; | 1391 | unsigned int rotation = plane_state->base.rotation; |
1209 | struct drm_format_name_buf format_name; | 1392 | struct drm_format_name_buf format_name; |
@@ -1232,13 +1415,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, | |||
1232 | } | 1415 | } |
1233 | 1416 | ||
1234 | /* | 1417 | /* |
1235 | * 90/270 is not allowed with RGB64 16:16:16:16, | 1418 | * 90/270 is not allowed with RGB64 16:16:16:16 and |
1236 | * RGB 16-bit 5:6:5, and Indexed 8-bit. | 1419 | * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards. |
1237 | * TBD: Add RGB64 case once its added in supported format list. | 1420 | * TBD: Add RGB64 case once its added in supported format |
1421 | * list. | ||
1238 | */ | 1422 | */ |
1239 | switch (fb->format->format) { | 1423 | switch (fb->format->format) { |
1240 | case DRM_FORMAT_C8: | ||
1241 | case DRM_FORMAT_RGB565: | 1424 | case DRM_FORMAT_RGB565: |
1425 | if (INTEL_GEN(dev_priv) >= 11) | ||
1426 | break; | ||
1427 | /* fall through */ | ||
1428 | case DRM_FORMAT_C8: | ||
1242 | DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", | 1429 | DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", |
1243 | drm_get_format_name(fb->format->format, | 1430 | drm_get_format_name(fb->format->format, |
1244 | &format_name)); | 1431 | &format_name)); |
@@ -1292,12 +1479,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s | |||
1292 | return 0; | 1479 | return 0; |
1293 | } | 1480 | } |
1294 | 1481 | ||
1295 | int skl_plane_check(struct intel_crtc_state *crtc_state, | 1482 | static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) |
1296 | struct intel_plane_state *plane_state) | 1483 | { |
1484 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
1485 | unsigned int rotation = plane_state->base.rotation; | ||
1486 | int src_w = drm_rect_width(&plane_state->base.src) >> 16; | ||
1487 | |||
1488 | /* Display WA #1106 */ | ||
1489 | if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 && | ||
1490 | (rotation == DRM_MODE_ROTATE_270 || | ||
1491 | rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { | ||
1492 | DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n"); | ||
1493 | return -EINVAL; | ||
1494 | } | ||
1495 | |||
1496 | return 0; | ||
1497 | } | ||
1498 | |||
1499 | static int skl_plane_check(struct intel_crtc_state *crtc_state, | ||
1500 | struct intel_plane_state *plane_state) | ||
1297 | { | 1501 | { |
1298 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); | 1502 | struct intel_plane *plane = to_intel_plane(plane_state->base.plane); |
1299 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); | 1503 | struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
1300 | int max_scale, min_scale; | 1504 | const struct drm_framebuffer *fb = plane_state->base.fb; |
1505 | int min_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1506 | int max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1301 | int ret; | 1507 | int ret; |
1302 | 1508 | ||
1303 | ret = skl_plane_check_fb(crtc_state, plane_state); | 1509 | ret = skl_plane_check_fb(crtc_state, plane_state); |
@@ -1305,15 +1511,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state, | |||
1305 | return ret; | 1511 | return ret; |
1306 | 1512 | ||
1307 | /* use scaler when colorkey is not required */ | 1513 | /* use scaler when colorkey is not required */ |
1308 | if (!plane_state->ckey.flags) { | 1514 | if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { |
1309 | const struct drm_framebuffer *fb = plane_state->base.fb; | ||
1310 | |||
1311 | min_scale = 1; | 1515 | min_scale = 1; |
1312 | max_scale = skl_max_scale(crtc_state, | 1516 | max_scale = skl_max_scale(crtc_state, fb->format->format); |
1313 | fb ? fb->format->format : 0); | ||
1314 | } else { | ||
1315 | min_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1316 | max_scale = DRM_PLANE_HELPER_NO_SCALING; | ||
1317 | } | 1517 | } |
1318 | 1518 | ||
1319 | ret = drm_atomic_helper_check_plane_state(&plane_state->base, | 1519 | ret = drm_atomic_helper_check_plane_state(&plane_state->base, |
@@ -1334,10 +1534,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state, | |||
1334 | if (ret) | 1534 | if (ret) |
1335 | return ret; | 1535 | return ret; |
1336 | 1536 | ||
1537 | ret = skl_plane_check_nv12_rotation(plane_state); | ||
1538 | if (ret) | ||
1539 | return ret; | ||
1540 | |||
1337 | ret = skl_check_plane_surface(plane_state); | 1541 | ret = skl_check_plane_surface(plane_state); |
1338 | if (ret) | 1542 | if (ret) |
1339 | return ret; | 1543 | return ret; |
1340 | 1544 | ||
1545 | /* HW only has 8 bits pixel precision, disable plane if invisible */ | ||
1546 | if (!(plane_state->base.alpha >> 8)) | ||
1547 | plane_state->base.visible = false; | ||
1548 | |||
1341 | plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); | 1549 | plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); |
1342 | 1550 | ||
1343 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) | 1551 | if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) |
@@ -1502,24 +1710,30 @@ static const uint32_t vlv_plane_formats[] = { | |||
1502 | DRM_FORMAT_VYUY, | 1710 | DRM_FORMAT_VYUY, |
1503 | }; | 1711 | }; |
1504 | 1712 | ||
1505 | static uint32_t skl_plane_formats[] = { | 1713 | static const uint32_t skl_plane_formats[] = { |
1714 | DRM_FORMAT_C8, | ||
1506 | DRM_FORMAT_RGB565, | 1715 | DRM_FORMAT_RGB565, |
1507 | DRM_FORMAT_ABGR8888, | ||
1508 | DRM_FORMAT_ARGB8888, | ||
1509 | DRM_FORMAT_XBGR8888, | ||
1510 | DRM_FORMAT_XRGB8888, | 1716 | DRM_FORMAT_XRGB8888, |
1717 | DRM_FORMAT_XBGR8888, | ||
1718 | DRM_FORMAT_ARGB8888, | ||
1719 | DRM_FORMAT_ABGR8888, | ||
1720 | DRM_FORMAT_XRGB2101010, | ||
1721 | DRM_FORMAT_XBGR2101010, | ||
1511 | DRM_FORMAT_YUYV, | 1722 | DRM_FORMAT_YUYV, |
1512 | DRM_FORMAT_YVYU, | 1723 | DRM_FORMAT_YVYU, |
1513 | DRM_FORMAT_UYVY, | 1724 | DRM_FORMAT_UYVY, |
1514 | DRM_FORMAT_VYUY, | 1725 | DRM_FORMAT_VYUY, |
1515 | }; | 1726 | }; |
1516 | 1727 | ||
1517 | static uint32_t skl_planar_formats[] = { | 1728 | static const uint32_t skl_planar_formats[] = { |
1729 | DRM_FORMAT_C8, | ||
1518 | DRM_FORMAT_RGB565, | 1730 | DRM_FORMAT_RGB565, |
1519 | DRM_FORMAT_ABGR8888, | ||
1520 | DRM_FORMAT_ARGB8888, | ||
1521 | DRM_FORMAT_XBGR8888, | ||
1522 | DRM_FORMAT_XRGB8888, | 1731 | DRM_FORMAT_XRGB8888, |
1732 | DRM_FORMAT_XBGR8888, | ||
1733 | DRM_FORMAT_ARGB8888, | ||
1734 | DRM_FORMAT_ABGR8888, | ||
1735 | DRM_FORMAT_XRGB2101010, | ||
1736 | DRM_FORMAT_XBGR2101010, | ||
1523 | DRM_FORMAT_YUYV, | 1737 | DRM_FORMAT_YUYV, |
1524 | DRM_FORMAT_YVYU, | 1738 | DRM_FORMAT_YVYU, |
1525 | DRM_FORMAT_UYVY, | 1739 | DRM_FORMAT_UYVY, |
@@ -1724,8 +1938,36 @@ static const struct drm_plane_funcs skl_plane_funcs = { | |||
1724 | .format_mod_supported = skl_plane_format_mod_supported, | 1938 | .format_mod_supported = skl_plane_format_mod_supported, |
1725 | }; | 1939 | }; |
1726 | 1940 | ||
1727 | bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, | 1941 | static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, |
1728 | enum pipe pipe, enum plane_id plane_id) | 1942 | enum pipe pipe, enum plane_id plane_id) |
1943 | { | ||
1944 | if (!HAS_FBC(dev_priv)) | ||
1945 | return false; | ||
1946 | |||
1947 | return pipe == PIPE_A && plane_id == PLANE_PRIMARY; | ||
1948 | } | ||
1949 | |||
1950 | static bool skl_plane_has_planar(struct drm_i915_private *dev_priv, | ||
1951 | enum pipe pipe, enum plane_id plane_id) | ||
1952 | { | ||
1953 | if (INTEL_GEN(dev_priv) >= 11) | ||
1954 | return plane_id <= PLANE_SPRITE3; | ||
1955 | |||
1956 | /* Display WA #0870: skl, bxt */ | ||
1957 | if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv)) | ||
1958 | return false; | ||
1959 | |||
1960 | if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C) | ||
1961 | return false; | ||
1962 | |||
1963 | if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0) | ||
1964 | return false; | ||
1965 | |||
1966 | return true; | ||
1967 | } | ||
1968 | |||
1969 | static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, | ||
1970 | enum pipe pipe, enum plane_id plane_id) | ||
1729 | { | 1971 | { |
1730 | if (plane_id == PLANE_CURSOR) | 1972 | if (plane_id == PLANE_CURSOR) |
1731 | return false; | 1973 | return false; |
@@ -1742,109 +1984,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, | |||
1742 | } | 1984 | } |
1743 | 1985 | ||
1744 | struct intel_plane * | 1986 | struct intel_plane * |
1745 | intel_sprite_plane_create(struct drm_i915_private *dev_priv, | 1987 | skl_universal_plane_create(struct drm_i915_private *dev_priv, |
1746 | enum pipe pipe, int plane) | 1988 | enum pipe pipe, enum plane_id plane_id) |
1747 | { | 1989 | { |
1748 | struct intel_plane *intel_plane = NULL; | 1990 | struct intel_plane *plane; |
1749 | struct intel_plane_state *state = NULL; | 1991 | enum drm_plane_type plane_type; |
1750 | const struct drm_plane_funcs *plane_funcs; | ||
1751 | unsigned long possible_crtcs; | ||
1752 | const uint32_t *plane_formats; | ||
1753 | const uint64_t *modifiers; | ||
1754 | unsigned int supported_rotations; | 1992 | unsigned int supported_rotations; |
1755 | int num_plane_formats; | 1993 | unsigned int possible_crtcs; |
1994 | const u64 *modifiers; | ||
1995 | const u32 *formats; | ||
1996 | int num_formats; | ||
1756 | int ret; | 1997 | int ret; |
1757 | 1998 | ||
1758 | intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); | 1999 | plane = intel_plane_alloc(); |
1759 | if (!intel_plane) { | 2000 | if (IS_ERR(plane)) |
1760 | ret = -ENOMEM; | 2001 | return plane; |
1761 | goto fail; | 2002 | |
2003 | plane->pipe = pipe; | ||
2004 | plane->id = plane_id; | ||
2005 | plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); | ||
2006 | |||
2007 | plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); | ||
2008 | if (plane->has_fbc) { | ||
2009 | struct intel_fbc *fbc = &dev_priv->fbc; | ||
2010 | |||
2011 | fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; | ||
1762 | } | 2012 | } |
1763 | 2013 | ||
1764 | state = intel_create_plane_state(&intel_plane->base); | 2014 | plane->max_stride = skl_plane_max_stride; |
1765 | if (!state) { | 2015 | plane->update_plane = skl_update_plane; |
1766 | ret = -ENOMEM; | 2016 | plane->disable_plane = skl_disable_plane; |
1767 | goto fail; | 2017 | plane->get_hw_state = skl_plane_get_hw_state; |
2018 | plane->check_plane = skl_plane_check; | ||
2019 | if (icl_is_nv12_y_plane(plane_id)) | ||
2020 | plane->update_slave = icl_update_slave; | ||
2021 | |||
2022 | if (skl_plane_has_planar(dev_priv, pipe, plane_id)) { | ||
2023 | formats = skl_planar_formats; | ||
2024 | num_formats = ARRAY_SIZE(skl_planar_formats); | ||
2025 | } else { | ||
2026 | formats = skl_plane_formats; | ||
2027 | num_formats = ARRAY_SIZE(skl_plane_formats); | ||
1768 | } | 2028 | } |
1769 | intel_plane->base.state = &state->base; | ||
1770 | 2029 | ||
1771 | if (INTEL_GEN(dev_priv) >= 9) { | 2030 | plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); |
1772 | state->scaler_id = -1; | 2031 | if (plane->has_ccs) |
2032 | modifiers = skl_plane_format_modifiers_ccs; | ||
2033 | else | ||
2034 | modifiers = skl_plane_format_modifiers_noccs; | ||
1773 | 2035 | ||
1774 | intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, | 2036 | if (plane_id == PLANE_PRIMARY) |
1775 | PLANE_SPRITE0 + plane); | 2037 | plane_type = DRM_PLANE_TYPE_PRIMARY; |
2038 | else | ||
2039 | plane_type = DRM_PLANE_TYPE_OVERLAY; | ||
1776 | 2040 | ||
1777 | intel_plane->max_stride = skl_plane_max_stride; | 2041 | possible_crtcs = BIT(pipe); |
1778 | intel_plane->update_plane = skl_update_plane; | ||
1779 | intel_plane->disable_plane = skl_disable_plane; | ||
1780 | intel_plane->get_hw_state = skl_plane_get_hw_state; | ||
1781 | intel_plane->check_plane = skl_plane_check; | ||
1782 | 2042 | ||
1783 | if (skl_plane_has_planar(dev_priv, pipe, | 2043 | ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, |
1784 | PLANE_SPRITE0 + plane)) { | 2044 | possible_crtcs, &skl_plane_funcs, |
1785 | plane_formats = skl_planar_formats; | 2045 | formats, num_formats, modifiers, |
1786 | num_plane_formats = ARRAY_SIZE(skl_planar_formats); | 2046 | plane_type, |
1787 | } else { | 2047 | "plane %d%c", plane_id + 1, |
1788 | plane_formats = skl_plane_formats; | 2048 | pipe_name(pipe)); |
1789 | num_plane_formats = ARRAY_SIZE(skl_plane_formats); | 2049 | if (ret) |
1790 | } | 2050 | goto fail; |
1791 | 2051 | ||
1792 | if (intel_plane->has_ccs) | 2052 | supported_rotations = |
1793 | modifiers = skl_plane_format_modifiers_ccs; | 2053 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | |
1794 | else | 2054 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; |
1795 | modifiers = skl_plane_format_modifiers_noccs; | 2055 | |
1796 | 2056 | if (INTEL_GEN(dev_priv) >= 10) | |
1797 | plane_funcs = &skl_plane_funcs; | 2057 | supported_rotations |= DRM_MODE_REFLECT_X; |
1798 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 2058 | |
1799 | intel_plane->max_stride = i9xx_plane_max_stride; | 2059 | drm_plane_create_rotation_property(&plane->base, |
1800 | intel_plane->update_plane = vlv_update_plane; | 2060 | DRM_MODE_ROTATE_0, |
1801 | intel_plane->disable_plane = vlv_disable_plane; | 2061 | supported_rotations); |
1802 | intel_plane->get_hw_state = vlv_plane_get_hw_state; | 2062 | |
1803 | intel_plane->check_plane = vlv_sprite_check; | 2063 | drm_plane_create_color_properties(&plane->base, |
1804 | 2064 | BIT(DRM_COLOR_YCBCR_BT601) | | |
1805 | plane_formats = vlv_plane_formats; | 2065 | BIT(DRM_COLOR_YCBCR_BT709), |
1806 | num_plane_formats = ARRAY_SIZE(vlv_plane_formats); | 2066 | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | |
2067 | BIT(DRM_COLOR_YCBCR_FULL_RANGE), | ||
2068 | DRM_COLOR_YCBCR_BT709, | ||
2069 | DRM_COLOR_YCBCR_LIMITED_RANGE); | ||
2070 | |||
2071 | drm_plane_create_alpha_property(&plane->base); | ||
2072 | drm_plane_create_blend_mode_property(&plane->base, | ||
2073 | BIT(DRM_MODE_BLEND_PIXEL_NONE) | | ||
2074 | BIT(DRM_MODE_BLEND_PREMULTI) | | ||
2075 | BIT(DRM_MODE_BLEND_COVERAGE)); | ||
2076 | |||
2077 | drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); | ||
2078 | |||
2079 | return plane; | ||
2080 | |||
2081 | fail: | ||
2082 | intel_plane_free(plane); | ||
2083 | |||
2084 | return ERR_PTR(ret); | ||
2085 | } | ||
2086 | |||
2087 | struct intel_plane * | ||
2088 | intel_sprite_plane_create(struct drm_i915_private *dev_priv, | ||
2089 | enum pipe pipe, int sprite) | ||
2090 | { | ||
2091 | struct intel_plane *plane; | ||
2092 | const struct drm_plane_funcs *plane_funcs; | ||
2093 | unsigned long possible_crtcs; | ||
2094 | unsigned int supported_rotations; | ||
2095 | const u64 *modifiers; | ||
2096 | const u32 *formats; | ||
2097 | int num_formats; | ||
2098 | int ret; | ||
2099 | |||
2100 | if (INTEL_GEN(dev_priv) >= 9) | ||
2101 | return skl_universal_plane_create(dev_priv, pipe, | ||
2102 | PLANE_SPRITE0 + sprite); | ||
2103 | |||
2104 | plane = intel_plane_alloc(); | ||
2105 | if (IS_ERR(plane)) | ||
2106 | return plane; | ||
2107 | |||
2108 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | ||
2109 | plane->max_stride = i9xx_plane_max_stride; | ||
2110 | plane->update_plane = vlv_update_plane; | ||
2111 | plane->disable_plane = vlv_disable_plane; | ||
2112 | plane->get_hw_state = vlv_plane_get_hw_state; | ||
2113 | plane->check_plane = vlv_sprite_check; | ||
2114 | |||
2115 | formats = vlv_plane_formats; | ||
2116 | num_formats = ARRAY_SIZE(vlv_plane_formats); | ||
1807 | modifiers = i9xx_plane_format_modifiers; | 2117 | modifiers = i9xx_plane_format_modifiers; |
1808 | 2118 | ||
1809 | plane_funcs = &vlv_sprite_funcs; | 2119 | plane_funcs = &vlv_sprite_funcs; |
1810 | } else if (INTEL_GEN(dev_priv) >= 7) { | 2120 | } else if (INTEL_GEN(dev_priv) >= 7) { |
1811 | intel_plane->max_stride = g4x_sprite_max_stride; | 2121 | plane->max_stride = g4x_sprite_max_stride; |
1812 | intel_plane->update_plane = ivb_update_plane; | 2122 | plane->update_plane = ivb_update_plane; |
1813 | intel_plane->disable_plane = ivb_disable_plane; | 2123 | plane->disable_plane = ivb_disable_plane; |
1814 | intel_plane->get_hw_state = ivb_plane_get_hw_state; | 2124 | plane->get_hw_state = ivb_plane_get_hw_state; |
1815 | intel_plane->check_plane = g4x_sprite_check; | 2125 | plane->check_plane = g4x_sprite_check; |
1816 | 2126 | ||
1817 | plane_formats = snb_plane_formats; | 2127 | formats = snb_plane_formats; |
1818 | num_plane_formats = ARRAY_SIZE(snb_plane_formats); | 2128 | num_formats = ARRAY_SIZE(snb_plane_formats); |
1819 | modifiers = i9xx_plane_format_modifiers; | 2129 | modifiers = i9xx_plane_format_modifiers; |
1820 | 2130 | ||
1821 | plane_funcs = &snb_sprite_funcs; | 2131 | plane_funcs = &snb_sprite_funcs; |
1822 | } else { | 2132 | } else { |
1823 | intel_plane->max_stride = g4x_sprite_max_stride; | 2133 | plane->max_stride = g4x_sprite_max_stride; |
1824 | intel_plane->update_plane = g4x_update_plane; | 2134 | plane->update_plane = g4x_update_plane; |
1825 | intel_plane->disable_plane = g4x_disable_plane; | 2135 | plane->disable_plane = g4x_disable_plane; |
1826 | intel_plane->get_hw_state = g4x_plane_get_hw_state; | 2136 | plane->get_hw_state = g4x_plane_get_hw_state; |
1827 | intel_plane->check_plane = g4x_sprite_check; | 2137 | plane->check_plane = g4x_sprite_check; |
1828 | 2138 | ||
1829 | modifiers = i9xx_plane_format_modifiers; | 2139 | modifiers = i9xx_plane_format_modifiers; |
1830 | if (IS_GEN6(dev_priv)) { | 2140 | if (IS_GEN6(dev_priv)) { |
1831 | plane_formats = snb_plane_formats; | 2141 | formats = snb_plane_formats; |
1832 | num_plane_formats = ARRAY_SIZE(snb_plane_formats); | 2142 | num_formats = ARRAY_SIZE(snb_plane_formats); |
1833 | 2143 | ||
1834 | plane_funcs = &snb_sprite_funcs; | 2144 | plane_funcs = &snb_sprite_funcs; |
1835 | } else { | 2145 | } else { |
1836 | plane_formats = g4x_plane_formats; | 2146 | formats = g4x_plane_formats; |
1837 | num_plane_formats = ARRAY_SIZE(g4x_plane_formats); | 2147 | num_formats = ARRAY_SIZE(g4x_plane_formats); |
1838 | 2148 | ||
1839 | plane_funcs = &g4x_sprite_funcs; | 2149 | plane_funcs = &g4x_sprite_funcs; |
1840 | } | 2150 | } |
1841 | } | 2151 | } |
1842 | 2152 | ||
1843 | if (INTEL_GEN(dev_priv) >= 9) { | 2153 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { |
1844 | supported_rotations = | ||
1845 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | | ||
1846 | DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; | ||
1847 | } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { | ||
1848 | supported_rotations = | 2154 | supported_rotations = |
1849 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | | 2155 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | |
1850 | DRM_MODE_REFLECT_X; | 2156 | DRM_MODE_REFLECT_X; |
@@ -1853,35 +2159,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, | |||
1853 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; | 2159 | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; |
1854 | } | 2160 | } |
1855 | 2161 | ||
1856 | intel_plane->pipe = pipe; | 2162 | plane->pipe = pipe; |
1857 | intel_plane->i9xx_plane = plane; | 2163 | plane->id = PLANE_SPRITE0 + sprite; |
1858 | intel_plane->id = PLANE_SPRITE0 + plane; | 2164 | plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); |
1859 | intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id); | ||
1860 | 2165 | ||
1861 | possible_crtcs = (1 << pipe); | 2166 | possible_crtcs = BIT(pipe); |
1862 | 2167 | ||
1863 | if (INTEL_GEN(dev_priv) >= 9) | 2168 | ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, |
1864 | ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, | 2169 | possible_crtcs, plane_funcs, |
1865 | possible_crtcs, plane_funcs, | 2170 | formats, num_formats, modifiers, |
1866 | plane_formats, num_plane_formats, | 2171 | DRM_PLANE_TYPE_OVERLAY, |
1867 | modifiers, | 2172 | "sprite %c", sprite_name(pipe, sprite)); |
1868 | DRM_PLANE_TYPE_OVERLAY, | ||
1869 | "plane %d%c", plane + 2, pipe_name(pipe)); | ||
1870 | else | ||
1871 | ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, | ||
1872 | possible_crtcs, plane_funcs, | ||
1873 | plane_formats, num_plane_formats, | ||
1874 | modifiers, | ||
1875 | DRM_PLANE_TYPE_OVERLAY, | ||
1876 | "sprite %c", sprite_name(pipe, plane)); | ||
1877 | if (ret) | 2173 | if (ret) |
1878 | goto fail; | 2174 | goto fail; |
1879 | 2175 | ||
1880 | drm_plane_create_rotation_property(&intel_plane->base, | 2176 | drm_plane_create_rotation_property(&plane->base, |
1881 | DRM_MODE_ROTATE_0, | 2177 | DRM_MODE_ROTATE_0, |
1882 | supported_rotations); | 2178 | supported_rotations); |
1883 | 2179 | ||
1884 | drm_plane_create_color_properties(&intel_plane->base, | 2180 | drm_plane_create_color_properties(&plane->base, |
1885 | BIT(DRM_COLOR_YCBCR_BT601) | | 2181 | BIT(DRM_COLOR_YCBCR_BT601) | |
1886 | BIT(DRM_COLOR_YCBCR_BT709), | 2182 | BIT(DRM_COLOR_YCBCR_BT709), |
1887 | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | | 2183 | BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | |
@@ -1889,13 +2185,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, | |||
1889 | DRM_COLOR_YCBCR_BT709, | 2185 | DRM_COLOR_YCBCR_BT709, |
1890 | DRM_COLOR_YCBCR_LIMITED_RANGE); | 2186 | DRM_COLOR_YCBCR_LIMITED_RANGE); |
1891 | 2187 | ||
1892 | drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); | 2188 | drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); |
1893 | 2189 | ||
1894 | return intel_plane; | 2190 | return plane; |
1895 | 2191 | ||
1896 | fail: | 2192 | fail: |
1897 | kfree(state); | 2193 | intel_plane_free(plane); |
1898 | kfree(intel_plane); | ||
1899 | 2194 | ||
1900 | return ERR_PTR(ret); | 2195 | return ERR_PTR(ret); |
1901 | } | 2196 | } |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index b5b04cb892e9..860f306a23ba 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder, | |||
885 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) | 885 | if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
886 | return false; | 886 | return false; |
887 | 887 | ||
888 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
888 | adjusted_mode->crtc_clock = tv_mode->clock; | 889 | adjusted_mode->crtc_clock = tv_mode->clock; |
889 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); | 890 | DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); |
890 | pipe_config->pipe_bpp = 8*3; | 891 | pipe_config->pipe_bpp = 8*3; |
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector) | |||
1377 | return count; | 1378 | return count; |
1378 | } | 1379 | } |
1379 | 1380 | ||
1380 | static void | ||
1381 | intel_tv_destroy(struct drm_connector *connector) | ||
1382 | { | ||
1383 | drm_connector_cleanup(connector); | ||
1384 | kfree(connector); | ||
1385 | } | ||
1386 | |||
1387 | static const struct drm_connector_funcs intel_tv_connector_funcs = { | 1381 | static const struct drm_connector_funcs intel_tv_connector_funcs = { |
1388 | .late_register = intel_connector_register, | 1382 | .late_register = intel_connector_register, |
1389 | .early_unregister = intel_connector_unregister, | 1383 | .early_unregister = intel_connector_unregister, |
1390 | .destroy = intel_tv_destroy, | 1384 | .destroy = intel_connector_destroy, |
1391 | .fill_modes = drm_helper_probe_single_connector_modes, | 1385 | .fill_modes = drm_helper_probe_single_connector_modes, |
1392 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | 1386 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
1393 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 1387 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index b1b3e81b6e24..b34c318b238d 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c | |||
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915) | |||
376 | 376 | ||
377 | intel_guc_init_params(guc); | 377 | intel_guc_init_params(guc); |
378 | ret = intel_guc_fw_upload(guc); | 378 | ret = intel_guc_fw_upload(guc); |
379 | if (ret == 0 || ret != -EAGAIN) | 379 | if (ret == 0 || ret != -ETIMEDOUT) |
380 | break; | 380 | break; |
381 | 381 | ||
382 | DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " | 382 | DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and " |
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h index 87910aa83267..0e3bd580e267 100644 --- a/drivers/gpu/drm/i915/intel_uc_fw.h +++ b/drivers/gpu/drm/i915/intel_uc_fw.h | |||
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw) | |||
115 | return uc_fw->path != NULL; | 115 | return uc_fw->path != NULL; |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw) | ||
119 | { | ||
120 | return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS; | ||
121 | } | ||
122 | |||
118 | static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) | 123 | static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw) |
119 | { | 124 | { |
120 | if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS) | 125 | if (intel_uc_fw_is_loaded(uc_fw)) |
121 | uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; | 126 | uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING; |
122 | } | 127 | } |
123 | 128 | ||
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 3ad302c66254..9289515108c3 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) | |||
1437 | FORCEWAKE_MEDIA_VEBOX_GEN11(i), | 1437 | FORCEWAKE_MEDIA_VEBOX_GEN11(i), |
1438 | FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); | 1438 | FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); |
1439 | } | 1439 | } |
1440 | } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) { | 1440 | } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) { |
1441 | dev_priv->uncore.funcs.force_wake_get = | 1441 | dev_priv->uncore.funcs.force_wake_get = |
1442 | fw_domains_get_with_fallback; | 1442 | fw_domains_get_with_fallback; |
1443 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; | 1443 | dev_priv->uncore.funcs.force_wake_put = fw_domains_put; |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index bba98cf83cbd..bf3662ad5fed 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi { | |||
326 | ICL_DDC_BUS_PORT_4, | 326 | ICL_DDC_BUS_PORT_4, |
327 | }; | 327 | }; |
328 | 328 | ||
329 | #define DP_AUX_A 0x40 | ||
330 | #define DP_AUX_B 0x10 | ||
331 | #define DP_AUX_C 0x20 | ||
332 | #define DP_AUX_D 0x30 | ||
333 | #define DP_AUX_E 0x50 | ||
334 | #define DP_AUX_F 0x60 | ||
335 | |||
329 | #define VBT_DP_MAX_LINK_RATE_HBR3 0 | 336 | #define VBT_DP_MAX_LINK_RATE_HBR3 0 |
330 | #define VBT_DP_MAX_LINK_RATE_HBR2 1 | 337 | #define VBT_DP_MAX_LINK_RATE_HBR2 1 |
331 | #define VBT_DP_MAX_LINK_RATE_HBR 2 | 338 | #define VBT_DP_MAX_LINK_RATE_HBR 2 |
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 4bcdeaf8d98f..ca1f78a42b17 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c | |||
@@ -823,18 +823,21 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | |||
823 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); | 823 | _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); |
824 | 824 | ||
825 | /* WaInPlaceDecompressionHang:icl */ | 825 | /* WaInPlaceDecompressionHang:icl */ |
826 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 826 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, |
827 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | 827 | I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | |
828 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | ||
828 | 829 | ||
829 | /* WaPipelineFlushCoherentLines:icl */ | 830 | /* WaPipelineFlushCoherentLines:icl */ |
830 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | 831 | I915_WRITE(GEN8_L3SQCREG4, |
831 | GEN8_LQSC_FLUSH_COHERENT_LINES); | 832 | I915_READ(GEN8_L3SQCREG4) | |
833 | GEN8_LQSC_FLUSH_COHERENT_LINES); | ||
832 | 834 | ||
833 | /* Wa_1405543622:icl | 835 | /* Wa_1405543622:icl |
834 | * Formerly known as WaGAPZPriorityScheme | 836 | * Formerly known as WaGAPZPriorityScheme |
835 | */ | 837 | */ |
836 | I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) | | 838 | I915_WRITE(GEN8_GARBCNTL, |
837 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | 839 | I915_READ(GEN8_GARBCNTL) | |
840 | GEN11_ARBITRATION_PRIO_ORDER_MASK); | ||
838 | 841 | ||
839 | /* Wa_1604223664:icl | 842 | /* Wa_1604223664:icl |
840 | * Formerly known as WaL3BankAddressHashing | 843 | * Formerly known as WaL3BankAddressHashing |
@@ -854,21 +857,24 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | |||
854 | /* Wa_1405733216:icl | 857 | /* Wa_1405733216:icl |
855 | * Formerly known as WaDisableCleanEvicts | 858 | * Formerly known as WaDisableCleanEvicts |
856 | */ | 859 | */ |
857 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | 860 | I915_WRITE(GEN8_L3SQCREG4, |
858 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | 861 | I915_READ(GEN8_L3SQCREG4) | |
862 | GEN11_LQSC_CLEAN_EVICT_DISABLE); | ||
859 | 863 | ||
860 | /* Wa_1405766107:icl | 864 | /* Wa_1405766107:icl |
861 | * Formerly known as WaCL2SFHalfMaxAlloc | 865 | * Formerly known as WaCL2SFHalfMaxAlloc |
862 | */ | 866 | */ |
863 | I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) | | 867 | I915_WRITE(GEN11_LSN_UNSLCVC, |
864 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | | 868 | I915_READ(GEN11_LSN_UNSLCVC) | |
865 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); | 869 | GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | |
870 | GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); | ||
866 | 871 | ||
867 | /* Wa_220166154:icl | 872 | /* Wa_220166154:icl |
868 | * Formerly known as WaDisCtxReload | 873 | * Formerly known as WaDisCtxReload |
869 | */ | 874 | */ |
870 | I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) | | 875 | I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA, |
871 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); | 876 | I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) | |
877 | GAMW_ECO_DEV_CTX_RELOAD_DISABLE); | ||
872 | 878 | ||
873 | /* Wa_1405779004:icl (pre-prod) */ | 879 | /* Wa_1405779004:icl (pre-prod) */ |
874 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) | 880 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0)) |
@@ -905,6 +911,13 @@ static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv) | |||
905 | I915_WRITE(GAMT_CHKN_BIT_REG, | 911 | I915_WRITE(GAMT_CHKN_BIT_REG, |
906 | I915_READ(GAMT_CHKN_BIT_REG) | | 912 | I915_READ(GAMT_CHKN_BIT_REG) | |
907 | GAMT_CHKN_DISABLE_L3_COH_PIPE); | 913 | GAMT_CHKN_DISABLE_L3_COH_PIPE); |
914 | |||
915 | /* Wa_1406609255:icl (pre-prod) */ | ||
916 | if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0)) | ||
917 | I915_WRITE(GEN7_SARCHKMD, | ||
918 | I915_READ(GEN7_SARCHKMD) | | ||
919 | GEN7_DISABLE_DEMAND_PREFETCH | | ||
920 | GEN7_DISABLE_SAMPLER_PREFETCH); | ||
908 | } | 921 | } |
909 | 922 | ||
910 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) | 923 | void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv) |
@@ -941,7 +954,7 @@ struct whitelist { | |||
941 | 954 | ||
942 | static void whitelist_reg(struct whitelist *w, i915_reg_t reg) | 955 | static void whitelist_reg(struct whitelist *w, i915_reg_t reg) |
943 | { | 956 | { |
944 | if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) | 957 | if (GEM_DEBUG_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS)) |
945 | return; | 958 | return; |
946 | 959 | ||
947 | w->reg[w->count++] = reg; | 960 | w->reg[w->count++] = reg; |
@@ -1009,6 +1022,11 @@ static void cnl_whitelist_build(struct whitelist *w) | |||
1009 | 1022 | ||
1010 | static void icl_whitelist_build(struct whitelist *w) | 1023 | static void icl_whitelist_build(struct whitelist *w) |
1011 | { | 1024 | { |
1025 | /* WaAllowUMDToModifyHalfSliceChicken7:icl */ | ||
1026 | whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7); | ||
1027 | |||
1028 | /* WaAllowUMDToModifySamplerMode:icl */ | ||
1029 | whitelist_reg(w, GEN10_SAMPLER_MODE); | ||
1012 | } | 1030 | } |
1013 | 1031 | ||
1014 | static struct whitelist *whitelist_build(struct intel_engine_cs *engine, | 1032 | static struct whitelist *whitelist_build(struct intel_engine_cs *engine, |
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index 8d03f64eabd7..26c065c8d2c0 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c | |||
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) | |||
551 | err = igt_check_page_sizes(vma); | 551 | err = igt_check_page_sizes(vma); |
552 | 552 | ||
553 | if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { | 553 | if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) { |
554 | pr_err("page_sizes.gtt=%u, expected %lu\n", | 554 | pr_err("page_sizes.gtt=%u, expected %llu\n", |
555 | vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); | 555 | vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K); |
556 | err = -EINVAL; | 556 | err = -EINVAL; |
557 | } | 557 | } |
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, | |||
1135 | n = 0; | 1135 | n = 0; |
1136 | for_each_engine(engine, i915, id) { | 1136 | for_each_engine(engine, i915, id) { |
1137 | if (!intel_engine_can_store_dword(engine)) { | 1137 | if (!intel_engine_can_store_dword(engine)) { |
1138 | pr_info("store-dword-imm not supported on engine=%u\n", id); | 1138 | pr_info("store-dword-imm not supported on engine=%u\n", |
1139 | id); | ||
1139 | continue; | 1140 | continue; |
1140 | } | 1141 | } |
1141 | engines[n++] = engine; | 1142 | engines[n++] = engine; |
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, | |||
1167 | engine = engines[order[i] % n]; | 1168 | engine = engines[order[i] % n]; |
1168 | i = (i + 1) % (n * I915_NUM_ENGINES); | 1169 | i = (i + 1) % (n * I915_NUM_ENGINES); |
1169 | 1170 | ||
1170 | err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1); | 1171 | /* |
1172 | * In order to utilize 64K pages we need to both pad the vma | ||
1173 | * size and ensure the vma offset is at the start of the pt | ||
1174 | * boundary, however to improve coverage we opt for testing both | ||
1175 | * aligned and unaligned offsets. | ||
1176 | */ | ||
1177 | if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) | ||
1178 | offset_low = round_down(offset_low, | ||
1179 | I915_GTT_PAGE_SIZE_2M); | ||
1180 | |||
1181 | err = __igt_write_huge(ctx, engine, obj, size, offset_low, | ||
1182 | dword, num + 1); | ||
1171 | if (err) | 1183 | if (err) |
1172 | break; | 1184 | break; |
1173 | 1185 | ||
1174 | err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1); | 1186 | err = __igt_write_huge(ctx, engine, obj, size, offset_high, |
1187 | dword, num + 1); | ||
1175 | if (err) | 1188 | if (err) |
1176 | break; | 1189 | break; |
1177 | 1190 | ||
1178 | if (igt_timeout(end_time, | 1191 | if (igt_timeout(end_time, |
1179 | "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", | 1192 | "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", |
1180 | __func__, engine->id, offset_low, offset_high, max_page_size)) | 1193 | __func__, engine->id, offset_low, offset_high, |
1194 | max_page_size)) | ||
1181 | break; | 1195 | break; |
1182 | } | 1196 | } |
1183 | 1197 | ||
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg) | |||
1436 | * huge-gtt-pages. | 1450 | * huge-gtt-pages. |
1437 | */ | 1451 | */ |
1438 | 1452 | ||
1439 | if (!USES_FULL_48BIT_PPGTT(dev_priv)) { | 1453 | if (!HAS_FULL_48BIT_PPGTT(dev_priv)) { |
1440 | pr_info("48b PPGTT not supported, skipping\n"); | 1454 | pr_info("48b PPGTT not supported, skipping\n"); |
1441 | return 0; | 1455 | return 0; |
1442 | } | 1456 | } |
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void) | |||
1687 | SUBTEST(igt_mock_ppgtt_huge_fill), | 1701 | SUBTEST(igt_mock_ppgtt_huge_fill), |
1688 | SUBTEST(igt_mock_ppgtt_64K), | 1702 | SUBTEST(igt_mock_ppgtt_64K), |
1689 | }; | 1703 | }; |
1690 | int saved_ppgtt = i915_modparams.enable_ppgtt; | ||
1691 | struct drm_i915_private *dev_priv; | 1704 | struct drm_i915_private *dev_priv; |
1692 | struct pci_dev *pdev; | ||
1693 | struct i915_hw_ppgtt *ppgtt; | 1705 | struct i915_hw_ppgtt *ppgtt; |
1706 | struct pci_dev *pdev; | ||
1694 | int err; | 1707 | int err; |
1695 | 1708 | ||
1696 | dev_priv = mock_gem_device(); | 1709 | dev_priv = mock_gem_device(); |
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void) | |||
1698 | return -ENOMEM; | 1711 | return -ENOMEM; |
1699 | 1712 | ||
1700 | /* Pretend to be a device which supports the 48b PPGTT */ | 1713 | /* Pretend to be a device which supports the 48b PPGTT */ |
1701 | i915_modparams.enable_ppgtt = 3; | 1714 | mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; |
1702 | 1715 | ||
1703 | pdev = dev_priv->drm.pdev; | 1716 | pdev = dev_priv->drm.pdev; |
1704 | dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); | 1717 | dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39)); |
@@ -1731,9 +1744,6 @@ out_close: | |||
1731 | 1744 | ||
1732 | out_unlock: | 1745 | out_unlock: |
1733 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1746 | mutex_unlock(&dev_priv->drm.struct_mutex); |
1734 | |||
1735 | i915_modparams.enable_ppgtt = saved_ppgtt; | ||
1736 | |||
1737 | drm_dev_put(&dev_priv->drm); | 1747 | drm_dev_put(&dev_priv->drm); |
1738 | 1748 | ||
1739 | return err; | 1749 | return err; |
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) | |||
1753 | struct i915_gem_context *ctx; | 1763 | struct i915_gem_context *ctx; |
1754 | int err; | 1764 | int err; |
1755 | 1765 | ||
1756 | if (!USES_PPGTT(dev_priv)) { | 1766 | if (!HAS_PPGTT(dev_priv)) { |
1757 | pr_info("PPGTT not supported, skipping live-selftests\n"); | 1767 | pr_info("PPGTT not supported, skipping live-selftests\n"); |
1758 | return 0; | 1768 | return 0; |
1759 | } | 1769 | } |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 76df25aa90c9..7d82043aff10 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c | |||
@@ -39,7 +39,8 @@ struct live_test { | |||
39 | const char *func; | 39 | const char *func; |
40 | const char *name; | 40 | const char *name; |
41 | 41 | ||
42 | unsigned int reset_count; | 42 | unsigned int reset_global; |
43 | unsigned int reset_engine[I915_NUM_ENGINES]; | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | static int begin_live_test(struct live_test *t, | 46 | static int begin_live_test(struct live_test *t, |
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t, | |||
47 | const char *func, | 48 | const char *func, |
48 | const char *name) | 49 | const char *name) |
49 | { | 50 | { |
51 | struct intel_engine_cs *engine; | ||
52 | enum intel_engine_id id; | ||
50 | int err; | 53 | int err; |
51 | 54 | ||
52 | t->i915 = i915; | 55 | t->i915 = i915; |
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t, | |||
63 | } | 66 | } |
64 | 67 | ||
65 | i915->gpu_error.missed_irq_rings = 0; | 68 | i915->gpu_error.missed_irq_rings = 0; |
66 | t->reset_count = i915_reset_count(&i915->gpu_error); | 69 | t->reset_global = i915_reset_count(&i915->gpu_error); |
70 | |||
71 | for_each_engine(engine, i915, id) | ||
72 | t->reset_engine[id] = | ||
73 | i915_reset_engine_count(&i915->gpu_error, engine); | ||
67 | 74 | ||
68 | return 0; | 75 | return 0; |
69 | } | 76 | } |
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t, | |||
71 | static int end_live_test(struct live_test *t) | 78 | static int end_live_test(struct live_test *t) |
72 | { | 79 | { |
73 | struct drm_i915_private *i915 = t->i915; | 80 | struct drm_i915_private *i915 = t->i915; |
81 | struct intel_engine_cs *engine; | ||
82 | enum intel_engine_id id; | ||
74 | 83 | ||
75 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) | 84 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) |
76 | return -EIO; | 85 | return -EIO; |
77 | 86 | ||
78 | if (t->reset_count != i915_reset_count(&i915->gpu_error)) { | 87 | if (t->reset_global != i915_reset_count(&i915->gpu_error)) { |
79 | pr_err("%s(%s): GPU was reset %d times!\n", | 88 | pr_err("%s(%s): GPU was reset %d times!\n", |
80 | t->func, t->name, | 89 | t->func, t->name, |
81 | i915_reset_count(&i915->gpu_error) - t->reset_count); | 90 | i915_reset_count(&i915->gpu_error) - t->reset_global); |
91 | return -EIO; | ||
92 | } | ||
93 | |||
94 | for_each_engine(engine, i915, id) { | ||
95 | if (t->reset_engine[id] == | ||
96 | i915_reset_engine_count(&i915->gpu_error, engine)) | ||
97 | continue; | ||
98 | |||
99 | pr_err("%s(%s): engine '%s' was reset %d times!\n", | ||
100 | t->func, t->name, engine->name, | ||
101 | i915_reset_engine_count(&i915->gpu_error, engine) - | ||
102 | t->reset_engine[id]); | ||
82 | return -EIO; | 103 | return -EIO; |
83 | } | 104 | } |
84 | 105 | ||
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg) | |||
531 | { | 552 | { |
532 | struct drm_i915_private *i915 = arg; | 553 | struct drm_i915_private *i915 = arg; |
533 | struct drm_i915_gem_object *obj = NULL; | 554 | struct drm_i915_gem_object *obj = NULL; |
555 | unsigned long ncontexts, ndwords, dw; | ||
534 | struct drm_file *file; | 556 | struct drm_file *file; |
535 | IGT_TIMEOUT(end_time); | 557 | IGT_TIMEOUT(end_time); |
536 | LIST_HEAD(objects); | 558 | LIST_HEAD(objects); |
537 | unsigned long ncontexts, ndwords, dw; | 559 | struct live_test t; |
538 | bool first_shared_gtt = true; | ||
539 | int err = -ENODEV; | 560 | int err = -ENODEV; |
540 | 561 | ||
541 | /* | 562 | /* |
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg) | |||
553 | 574 | ||
554 | mutex_lock(&i915->drm.struct_mutex); | 575 | mutex_lock(&i915->drm.struct_mutex); |
555 | 576 | ||
577 | err = begin_live_test(&t, i915, __func__, ""); | ||
578 | if (err) | ||
579 | goto out_unlock; | ||
580 | |||
556 | ncontexts = 0; | 581 | ncontexts = 0; |
557 | ndwords = 0; | 582 | ndwords = 0; |
558 | dw = 0; | 583 | dw = 0; |
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg) | |||
561 | struct i915_gem_context *ctx; | 586 | struct i915_gem_context *ctx; |
562 | unsigned int id; | 587 | unsigned int id; |
563 | 588 | ||
564 | if (first_shared_gtt) { | 589 | ctx = i915_gem_create_context(i915, file->driver_priv); |
565 | ctx = __create_hw_context(i915, file->driver_priv); | ||
566 | first_shared_gtt = false; | ||
567 | } else { | ||
568 | ctx = i915_gem_create_context(i915, file->driver_priv); | ||
569 | } | ||
570 | if (IS_ERR(ctx)) { | 590 | if (IS_ERR(ctx)) { |
571 | err = PTR_ERR(ctx); | 591 | err = PTR_ERR(ctx); |
572 | goto out_unlock; | 592 | goto out_unlock; |
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg) | |||
622 | } | 642 | } |
623 | 643 | ||
624 | out_unlock: | 644 | out_unlock: |
625 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) | 645 | if (end_live_test(&t)) |
626 | err = -EIO; | 646 | err = -EIO; |
627 | mutex_unlock(&i915->drm.struct_mutex); | 647 | mutex_unlock(&i915->drm.struct_mutex); |
628 | 648 | ||
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg) | |||
634 | { | 654 | { |
635 | struct drm_i915_private *i915 = arg; | 655 | struct drm_i915_private *i915 = arg; |
636 | struct drm_i915_gem_object *obj = NULL; | 656 | struct drm_i915_gem_object *obj = NULL; |
657 | struct i915_gem_context *ctx; | ||
658 | struct i915_hw_ppgtt *ppgtt; | ||
659 | unsigned long ndwords, dw; | ||
637 | struct drm_file *file; | 660 | struct drm_file *file; |
638 | I915_RND_STATE(prng); | 661 | I915_RND_STATE(prng); |
639 | IGT_TIMEOUT(end_time); | 662 | IGT_TIMEOUT(end_time); |
640 | LIST_HEAD(objects); | 663 | LIST_HEAD(objects); |
641 | struct i915_gem_context *ctx; | 664 | struct live_test t; |
642 | struct i915_hw_ppgtt *ppgtt; | ||
643 | unsigned long ndwords, dw; | ||
644 | int err = -ENODEV; | 665 | int err = -ENODEV; |
645 | 666 | ||
646 | /* | 667 | /* |
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg) | |||
655 | 676 | ||
656 | mutex_lock(&i915->drm.struct_mutex); | 677 | mutex_lock(&i915->drm.struct_mutex); |
657 | 678 | ||
679 | err = begin_live_test(&t, i915, __func__, ""); | ||
680 | if (err) | ||
681 | goto out_unlock; | ||
682 | |||
658 | ctx = i915_gem_create_context(i915, file->driver_priv); | 683 | ctx = i915_gem_create_context(i915, file->driver_priv); |
659 | if (IS_ERR(ctx)) { | 684 | if (IS_ERR(ctx)) { |
660 | err = PTR_ERR(ctx); | 685 | err = PTR_ERR(ctx); |
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg) | |||
727 | } | 752 | } |
728 | 753 | ||
729 | out_unlock: | 754 | out_unlock: |
730 | if (igt_flush_test(i915, I915_WAIT_LOCKED)) | 755 | if (end_live_test(&t)) |
756 | err = -EIO; | ||
757 | mutex_unlock(&i915->drm.struct_mutex); | ||
758 | |||
759 | mock_file_free(i915, file); | ||
760 | return err; | ||
761 | } | ||
762 | |||
763 | static int check_scratch(struct i915_gem_context *ctx, u64 offset) | ||
764 | { | ||
765 | struct drm_mm_node *node = | ||
766 | __drm_mm_interval_first(&ctx->ppgtt->vm.mm, | ||
767 | offset, offset + sizeof(u32) - 1); | ||
768 | if (!node || node->start > offset) | ||
769 | return 0; | ||
770 | |||
771 | GEM_BUG_ON(offset >= node->start + node->size); | ||
772 | |||
773 | pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n", | ||
774 | upper_32_bits(offset), lower_32_bits(offset)); | ||
775 | return -EINVAL; | ||
776 | } | ||
777 | |||
778 | static int write_to_scratch(struct i915_gem_context *ctx, | ||
779 | struct intel_engine_cs *engine, | ||
780 | u64 offset, u32 value) | ||
781 | { | ||
782 | struct drm_i915_private *i915 = ctx->i915; | ||
783 | struct drm_i915_gem_object *obj; | ||
784 | struct i915_request *rq; | ||
785 | struct i915_vma *vma; | ||
786 | u32 *cmd; | ||
787 | int err; | ||
788 | |||
789 | GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); | ||
790 | |||
791 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
792 | if (IS_ERR(obj)) | ||
793 | return PTR_ERR(obj); | ||
794 | |||
795 | cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); | ||
796 | if (IS_ERR(cmd)) { | ||
797 | err = PTR_ERR(cmd); | ||
798 | goto err; | ||
799 | } | ||
800 | |||
801 | *cmd++ = MI_STORE_DWORD_IMM_GEN4; | ||
802 | if (INTEL_GEN(i915) >= 8) { | ||
803 | *cmd++ = lower_32_bits(offset); | ||
804 | *cmd++ = upper_32_bits(offset); | ||
805 | } else { | ||
806 | *cmd++ = 0; | ||
807 | *cmd++ = offset; | ||
808 | } | ||
809 | *cmd++ = value; | ||
810 | *cmd = MI_BATCH_BUFFER_END; | ||
811 | i915_gem_object_unpin_map(obj); | ||
812 | |||
813 | err = i915_gem_object_set_to_gtt_domain(obj, false); | ||
814 | if (err) | ||
815 | goto err; | ||
816 | |||
817 | vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); | ||
818 | if (IS_ERR(vma)) { | ||
819 | err = PTR_ERR(vma); | ||
820 | goto err; | ||
821 | } | ||
822 | |||
823 | err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); | ||
824 | if (err) | ||
825 | goto err; | ||
826 | |||
827 | err = check_scratch(ctx, offset); | ||
828 | if (err) | ||
829 | goto err_unpin; | ||
830 | |||
831 | rq = i915_request_alloc(engine, ctx); | ||
832 | if (IS_ERR(rq)) { | ||
833 | err = PTR_ERR(rq); | ||
834 | goto err_unpin; | ||
835 | } | ||
836 | |||
837 | err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); | ||
838 | if (err) | ||
839 | goto err_request; | ||
840 | |||
841 | err = i915_vma_move_to_active(vma, rq, 0); | ||
842 | if (err) | ||
843 | goto skip_request; | ||
844 | |||
845 | i915_gem_object_set_active_reference(obj); | ||
846 | i915_vma_unpin(vma); | ||
847 | i915_vma_close(vma); | ||
848 | |||
849 | i915_request_add(rq); | ||
850 | |||
851 | return 0; | ||
852 | |||
853 | skip_request: | ||
854 | i915_request_skip(rq, err); | ||
855 | err_request: | ||
856 | i915_request_add(rq); | ||
857 | err_unpin: | ||
858 | i915_vma_unpin(vma); | ||
859 | err: | ||
860 | i915_gem_object_put(obj); | ||
861 | return err; | ||
862 | } | ||
863 | |||
864 | static int read_from_scratch(struct i915_gem_context *ctx, | ||
865 | struct intel_engine_cs *engine, | ||
866 | u64 offset, u32 *value) | ||
867 | { | ||
868 | struct drm_i915_private *i915 = ctx->i915; | ||
869 | struct drm_i915_gem_object *obj; | ||
870 | const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ | ||
871 | const u32 result = 0x100; | ||
872 | struct i915_request *rq; | ||
873 | struct i915_vma *vma; | ||
874 | u32 *cmd; | ||
875 | int err; | ||
876 | |||
877 | GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE); | ||
878 | |||
879 | obj = i915_gem_object_create_internal(i915, PAGE_SIZE); | ||
880 | if (IS_ERR(obj)) | ||
881 | return PTR_ERR(obj); | ||
882 | |||
883 | cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); | ||
884 | if (IS_ERR(cmd)) { | ||
885 | err = PTR_ERR(cmd); | ||
886 | goto err; | ||
887 | } | ||
888 | |||
889 | memset(cmd, POISON_INUSE, PAGE_SIZE); | ||
890 | if (INTEL_GEN(i915) >= 8) { | ||
891 | *cmd++ = MI_LOAD_REGISTER_MEM_GEN8; | ||
892 | *cmd++ = RCS_GPR0; | ||
893 | *cmd++ = lower_32_bits(offset); | ||
894 | *cmd++ = upper_32_bits(offset); | ||
895 | *cmd++ = MI_STORE_REGISTER_MEM_GEN8; | ||
896 | *cmd++ = RCS_GPR0; | ||
897 | *cmd++ = result; | ||
898 | *cmd++ = 0; | ||
899 | } else { | ||
900 | *cmd++ = MI_LOAD_REGISTER_MEM; | ||
901 | *cmd++ = RCS_GPR0; | ||
902 | *cmd++ = offset; | ||
903 | *cmd++ = MI_STORE_REGISTER_MEM; | ||
904 | *cmd++ = RCS_GPR0; | ||
905 | *cmd++ = result; | ||
906 | } | ||
907 | *cmd = MI_BATCH_BUFFER_END; | ||
908 | i915_gem_object_unpin_map(obj); | ||
909 | |||
910 | err = i915_gem_object_set_to_gtt_domain(obj, false); | ||
911 | if (err) | ||
912 | goto err; | ||
913 | |||
914 | vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); | ||
915 | if (IS_ERR(vma)) { | ||
916 | err = PTR_ERR(vma); | ||
917 | goto err; | ||
918 | } | ||
919 | |||
920 | err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); | ||
921 | if (err) | ||
922 | goto err; | ||
923 | |||
924 | err = check_scratch(ctx, offset); | ||
925 | if (err) | ||
926 | goto err_unpin; | ||
927 | |||
928 | rq = i915_request_alloc(engine, ctx); | ||
929 | if (IS_ERR(rq)) { | ||
930 | err = PTR_ERR(rq); | ||
931 | goto err_unpin; | ||
932 | } | ||
933 | |||
934 | err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0); | ||
935 | if (err) | ||
936 | goto err_request; | ||
937 | |||
938 | err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); | ||
939 | if (err) | ||
940 | goto skip_request; | ||
941 | |||
942 | i915_vma_unpin(vma); | ||
943 | i915_vma_close(vma); | ||
944 | |||
945 | i915_request_add(rq); | ||
946 | |||
947 | err = i915_gem_object_set_to_cpu_domain(obj, false); | ||
948 | if (err) | ||
949 | goto err; | ||
950 | |||
951 | cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); | ||
952 | if (IS_ERR(cmd)) { | ||
953 | err = PTR_ERR(cmd); | ||
954 | goto err; | ||
955 | } | ||
956 | |||
957 | *value = cmd[result / sizeof(*cmd)]; | ||
958 | i915_gem_object_unpin_map(obj); | ||
959 | i915_gem_object_put(obj); | ||
960 | |||
961 | return 0; | ||
962 | |||
963 | skip_request: | ||
964 | i915_request_skip(rq, err); | ||
965 | err_request: | ||
966 | i915_request_add(rq); | ||
967 | err_unpin: | ||
968 | i915_vma_unpin(vma); | ||
969 | err: | ||
970 | i915_gem_object_put(obj); | ||
971 | return err; | ||
972 | } | ||
973 | |||
974 | static int igt_vm_isolation(void *arg) | ||
975 | { | ||
976 | struct drm_i915_private *i915 = arg; | ||
977 | struct i915_gem_context *ctx_a, *ctx_b; | ||
978 | struct intel_engine_cs *engine; | ||
979 | struct drm_file *file; | ||
980 | I915_RND_STATE(prng); | ||
981 | unsigned long count; | ||
982 | struct live_test t; | ||
983 | unsigned int id; | ||
984 | u64 vm_total; | ||
985 | int err; | ||
986 | |||
987 | if (INTEL_GEN(i915) < 7) | ||
988 | return 0; | ||
989 | |||
990 | /* | ||
991 | * The simple goal here is that a write into one context is not | ||
992 | * observed in a second (separate page tables and scratch). | ||
993 | */ | ||
994 | |||
995 | file = mock_file(i915); | ||
996 | if (IS_ERR(file)) | ||
997 | return PTR_ERR(file); | ||
998 | |||
999 | mutex_lock(&i915->drm.struct_mutex); | ||
1000 | |||
1001 | err = begin_live_test(&t, i915, __func__, ""); | ||
1002 | if (err) | ||
1003 | goto out_unlock; | ||
1004 | |||
1005 | ctx_a = i915_gem_create_context(i915, file->driver_priv); | ||
1006 | if (IS_ERR(ctx_a)) { | ||
1007 | err = PTR_ERR(ctx_a); | ||
1008 | goto out_unlock; | ||
1009 | } | ||
1010 | |||
1011 | ctx_b = i915_gem_create_context(i915, file->driver_priv); | ||
1012 | if (IS_ERR(ctx_b)) { | ||
1013 | err = PTR_ERR(ctx_b); | ||
1014 | goto out_unlock; | ||
1015 | } | ||
1016 | |||
1017 | /* We can only test vm isolation, if the vm are distinct */ | ||
1018 | if (ctx_a->ppgtt == ctx_b->ppgtt) | ||
1019 | goto out_unlock; | ||
1020 | |||
1021 | vm_total = ctx_a->ppgtt->vm.total; | ||
1022 | GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total); | ||
1023 | vm_total -= I915_GTT_PAGE_SIZE; | ||
1024 | |||
1025 | intel_runtime_pm_get(i915); | ||
1026 | |||
1027 | count = 0; | ||
1028 | for_each_engine(engine, i915, id) { | ||
1029 | IGT_TIMEOUT(end_time); | ||
1030 | unsigned long this = 0; | ||
1031 | |||
1032 | if (!intel_engine_can_store_dword(engine)) | ||
1033 | continue; | ||
1034 | |||
1035 | while (!__igt_timeout(end_time, NULL)) { | ||
1036 | u32 value = 0xc5c5c5c5; | ||
1037 | u64 offset; | ||
1038 | |||
1039 | div64_u64_rem(i915_prandom_u64_state(&prng), | ||
1040 | vm_total, &offset); | ||
1041 | offset &= ~sizeof(u32); | ||
1042 | offset += I915_GTT_PAGE_SIZE; | ||
1043 | |||
1044 | err = write_to_scratch(ctx_a, engine, | ||
1045 | offset, 0xdeadbeef); | ||
1046 | if (err == 0) | ||
1047 | err = read_from_scratch(ctx_b, engine, | ||
1048 | offset, &value); | ||
1049 | if (err) | ||
1050 | goto out_rpm; | ||
1051 | |||
1052 | if (value) { | ||
1053 | pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", | ||
1054 | engine->name, value, | ||
1055 | upper_32_bits(offset), | ||
1056 | lower_32_bits(offset), | ||
1057 | this); | ||
1058 | err = -EINVAL; | ||
1059 | goto out_rpm; | ||
1060 | } | ||
1061 | |||
1062 | this++; | ||
1063 | } | ||
1064 | count += this; | ||
1065 | } | ||
1066 | pr_info("Checked %lu scratch offsets across %d engines\n", | ||
1067 | count, INTEL_INFO(i915)->num_rings); | ||
1068 | |||
1069 | out_rpm: | ||
1070 | intel_runtime_pm_put(i915); | ||
1071 | out_unlock: | ||
1072 | if (end_live_test(&t)) | ||
731 | err = -EIO; | 1073 | err = -EIO; |
732 | mutex_unlock(&i915->drm.struct_mutex); | 1074 | mutex_unlock(&i915->drm.struct_mutex); |
733 | 1075 | ||
@@ -865,33 +1207,6 @@ out_unlock: | |||
865 | return err; | 1207 | return err; |
866 | } | 1208 | } |
867 | 1209 | ||
868 | static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) | ||
869 | { | ||
870 | struct drm_i915_gem_object *obj; | ||
871 | int err; | ||
872 | |||
873 | err = i915_gem_init_aliasing_ppgtt(i915); | ||
874 | if (err) | ||
875 | return err; | ||
876 | |||
877 | list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { | ||
878 | struct i915_vma *vma; | ||
879 | |||
880 | vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); | ||
881 | if (IS_ERR(vma)) | ||
882 | continue; | ||
883 | |||
884 | vma->flags &= ~I915_VMA_LOCAL_BIND; | ||
885 | } | ||
886 | |||
887 | return 0; | ||
888 | } | ||
889 | |||
890 | static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915) | ||
891 | { | ||
892 | i915_gem_fini_aliasing_ppgtt(i915); | ||
893 | } | ||
894 | |||
895 | int i915_gem_context_mock_selftests(void) | 1210 | int i915_gem_context_mock_selftests(void) |
896 | { | 1211 | { |
897 | static const struct i915_subtest tests[] = { | 1212 | static const struct i915_subtest tests[] = { |
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) | |||
917 | SUBTEST(live_nop_switch), | 1232 | SUBTEST(live_nop_switch), |
918 | SUBTEST(igt_ctx_exec), | 1233 | SUBTEST(igt_ctx_exec), |
919 | SUBTEST(igt_ctx_readonly), | 1234 | SUBTEST(igt_ctx_readonly), |
1235 | SUBTEST(igt_vm_isolation), | ||
920 | }; | 1236 | }; |
921 | bool fake_alias = false; | ||
922 | int err; | ||
923 | 1237 | ||
924 | if (i915_terminally_wedged(&dev_priv->gpu_error)) | 1238 | if (i915_terminally_wedged(&dev_priv->gpu_error)) |
925 | return 0; | 1239 | return 0; |
926 | 1240 | ||
927 | /* Install a fake aliasing gtt for exercise */ | 1241 | return i915_subtests(tests, dev_priv); |
928 | if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) { | ||
929 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
930 | err = fake_aliasing_ppgtt_enable(dev_priv); | ||
931 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
932 | if (err) | ||
933 | return err; | ||
934 | |||
935 | GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt); | ||
936 | fake_alias = true; | ||
937 | } | ||
938 | |||
939 | err = i915_subtests(tests, dev_priv); | ||
940 | |||
941 | if (fake_alias) { | ||
942 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
943 | fake_aliasing_ppgtt_disable(dev_priv); | ||
944 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
945 | } | ||
946 | |||
947 | return err; | ||
948 | } | 1242 | } |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 128ad1cf0647..4365979d8222 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c | |||
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg) | |||
351 | * where the GTT space of the request is separate from the GGTT | 351 | * where the GTT space of the request is separate from the GGTT |
352 | * allocation required to build the request. | 352 | * allocation required to build the request. |
353 | */ | 353 | */ |
354 | if (!USES_FULL_PPGTT(i915)) | 354 | if (!HAS_FULL_PPGTT(i915)) |
355 | return 0; | 355 | return 0; |
356 | 356 | ||
357 | mutex_lock(&i915->drm.struct_mutex); | 357 | mutex_lock(&i915->drm.struct_mutex); |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8e2e269db97e..69fe86b30fbb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | |||
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg) | |||
153 | 153 | ||
154 | /* Allocate a ppggt and try to fill the entire range */ | 154 | /* Allocate a ppggt and try to fill the entire range */ |
155 | 155 | ||
156 | if (!USES_PPGTT(dev_priv)) | 156 | if (!HAS_PPGTT(dev_priv)) |
157 | return 0; | 157 | return 0; |
158 | 158 | ||
159 | ppgtt = __hw_ppgtt_create(dev_priv); | 159 | ppgtt = __hw_ppgtt_create(dev_priv); |
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, | |||
1001 | IGT_TIMEOUT(end_time); | 1001 | IGT_TIMEOUT(end_time); |
1002 | int err; | 1002 | int err; |
1003 | 1003 | ||
1004 | if (!USES_FULL_PPGTT(dev_priv)) | 1004 | if (!HAS_FULL_PPGTT(dev_priv)) |
1005 | return 0; | 1005 | return 0; |
1006 | 1006 | ||
1007 | file = mock_file(dev_priv); | 1007 | file = mock_file(dev_priv); |
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg) | |||
1337 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 1337 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1338 | if (vma->node.start != total || | 1338 | if (vma->node.start != total || |
1339 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | 1339 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1340 | pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", | 1340 | pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
1341 | vma->node.start, vma->node.size, | 1341 | vma->node.start, vma->node.size, |
1342 | total, 2*I915_GTT_PAGE_SIZE); | 1342 | total, 2*I915_GTT_PAGE_SIZE); |
1343 | err = -EINVAL; | 1343 | err = -EINVAL; |
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg) | |||
1386 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 1386 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1387 | if (vma->node.start != total || | 1387 | if (vma->node.start != total || |
1388 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | 1388 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1389 | pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", | 1389 | pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
1390 | vma->node.start, vma->node.size, | 1390 | vma->node.start, vma->node.size, |
1391 | total, 2*I915_GTT_PAGE_SIZE); | 1391 | total, 2*I915_GTT_PAGE_SIZE); |
1392 | err = -EINVAL; | 1392 | err = -EINVAL; |
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg) | |||
1430 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | 1430 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); |
1431 | if (vma->node.start != offset || | 1431 | if (vma->node.start != offset || |
1432 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { | 1432 | vma->node.size != 2*I915_GTT_PAGE_SIZE) { |
1433 | pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n", | 1433 | pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n", |
1434 | vma->node.start, vma->node.size, | 1434 | vma->node.start, vma->node.size, |
1435 | offset, 2*I915_GTT_PAGE_SIZE); | 1435 | offset, 2*I915_GTT_PAGE_SIZE); |
1436 | err = -EINVAL; | 1436 | err = -EINVAL; |
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 0c0ab82b6228..32cba4cae31a 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c | |||
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args) | |||
159 | * Get rid of clients created during driver load because the test will | 159 | * Get rid of clients created during driver load because the test will |
160 | * recreate them. | 160 | * recreate them. |
161 | */ | 161 | */ |
162 | guc_clients_disable(guc); | ||
162 | guc_clients_destroy(guc); | 163 | guc_clients_destroy(guc); |
163 | if (guc->execbuf_client || guc->preempt_client) { | 164 | if (guc->execbuf_client || guc->preempt_client) { |
164 | pr_err("guc_clients_destroy lied!\n"); | 165 | pr_err("guc_clients_destroy lied!\n"); |
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args) | |||
197 | goto out; | 198 | goto out; |
198 | } | 199 | } |
199 | 200 | ||
200 | /* Now create the doorbells */ | 201 | /* Now enable the clients */ |
201 | guc_clients_doorbell_init(guc); | 202 | guc_clients_enable(guc); |
202 | 203 | ||
203 | /* each client should now have received a doorbell */ | 204 | /* each client should now have received a doorbell */ |
204 | if (!client_doorbell_in_sync(guc->execbuf_client) || | 205 | if (!client_doorbell_in_sync(guc->execbuf_client) || |
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args) | |||
212 | * Basic test - an attempt to reallocate a valid doorbell to the | 213 | * Basic test - an attempt to reallocate a valid doorbell to the |
213 | * client it is currently assigned should not cause a failure. | 214 | * client it is currently assigned should not cause a failure. |
214 | */ | 215 | */ |
215 | err = guc_clients_doorbell_init(guc); | ||
216 | if (err) | ||
217 | goto out; | ||
218 | |||
219 | /* | ||
220 | * Negative test - a client with no doorbell (invalid db id). | ||
221 | * After destroying the doorbell, the db id is changed to | ||
222 | * GUC_DOORBELL_INVALID and the firmware will reject any attempt to | ||
223 | * allocate a doorbell with an invalid id (db has to be reserved before | ||
224 | * allocation). | ||
225 | */ | ||
226 | destroy_doorbell(guc->execbuf_client); | ||
227 | if (client_doorbell_in_sync(guc->execbuf_client)) { | ||
228 | pr_err("destroy db did not work\n"); | ||
229 | err = -EINVAL; | ||
230 | goto out; | ||
231 | } | ||
232 | |||
233 | unreserve_doorbell(guc->execbuf_client); | ||
234 | |||
235 | __create_doorbell(guc->execbuf_client); | ||
236 | err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id); | ||
237 | if (err != -EIO) { | ||
238 | pr_err("unexpected (err = %d)", err); | ||
239 | goto out_db; | ||
240 | } | ||
241 | |||
242 | if (!available_dbs(guc, guc->execbuf_client->priority)) { | ||
243 | pr_err("doorbell not available when it should\n"); | ||
244 | err = -EIO; | ||
245 | goto out_db; | ||
246 | } | ||
247 | |||
248 | out_db: | ||
249 | /* clean after test */ | ||
250 | __destroy_doorbell(guc->execbuf_client); | ||
251 | err = reserve_doorbell(guc->execbuf_client); | ||
252 | if (err) { | ||
253 | pr_err("failed to reserve back the doorbell back\n"); | ||
254 | } | ||
255 | err = create_doorbell(guc->execbuf_client); | 216 | err = create_doorbell(guc->execbuf_client); |
256 | if (err) { | ||
257 | pr_err("recreate doorbell failed\n"); | ||
258 | goto out; | ||
259 | } | ||
260 | 217 | ||
261 | out: | 218 | out: |
262 | /* | 219 | /* |
263 | * Leave clean state for other test, plus the driver always destroy the | 220 | * Leave clean state for other test, plus the driver always destroy the |
264 | * clients during unload. | 221 | * clients during unload. |
265 | */ | 222 | */ |
266 | destroy_doorbell(guc->execbuf_client); | 223 | guc_clients_disable(guc); |
267 | if (guc->preempt_client) | ||
268 | destroy_doorbell(guc->preempt_client); | ||
269 | guc_clients_destroy(guc); | 224 | guc_clients_destroy(guc); |
270 | guc_clients_create(guc); | 225 | guc_clients_create(guc); |
271 | guc_clients_doorbell_init(guc); | 226 | guc_clients_enable(guc); |
272 | unlock: | 227 | unlock: |
273 | intel_runtime_pm_put(dev_priv); | 228 | intel_runtime_pm_put(dev_priv); |
274 | mutex_unlock(&dev_priv->drm.struct_mutex); | 229 | mutex_unlock(&dev_priv->drm.struct_mutex); |
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg) | |||
352 | 307 | ||
353 | db_id = clients[i]->doorbell_id; | 308 | db_id = clients[i]->doorbell_id; |
354 | 309 | ||
355 | err = create_doorbell(clients[i]); | 310 | err = __guc_client_enable(clients[i]); |
356 | if (err) { | 311 | if (err) { |
357 | pr_err("[%d] Failed to create a doorbell\n", i); | 312 | pr_err("[%d] Failed to create a doorbell\n", i); |
358 | goto out; | 313 | goto out; |
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg) | |||
378 | out: | 333 | out: |
379 | for (i = 0; i < ATTEMPTS; i++) | 334 | for (i = 0; i < ATTEMPTS; i++) |
380 | if (!IS_ERR_OR_NULL(clients[i])) { | 335 | if (!IS_ERR_OR_NULL(clients[i])) { |
381 | destroy_doorbell(clients[i]); | 336 | __guc_client_disable(clients[i]); |
382 | guc_client_free(clients[i]); | 337 | guc_client_free(clients[i]); |
383 | } | 338 | } |
384 | unlock: | 339 | unlock: |
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index db378226ac10..defe671130ab 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c | |||
@@ -76,7 +76,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) | |||
76 | h->seqno = memset(vaddr, 0xff, PAGE_SIZE); | 76 | h->seqno = memset(vaddr, 0xff, PAGE_SIZE); |
77 | 77 | ||
78 | vaddr = i915_gem_object_pin_map(h->obj, | 78 | vaddr = i915_gem_object_pin_map(h->obj, |
79 | HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC); | 79 | i915_coherent_map_type(i915)); |
80 | if (IS_ERR(vaddr)) { | 80 | if (IS_ERR(vaddr)) { |
81 | err = PTR_ERR(vaddr); | 81 | err = PTR_ERR(vaddr); |
82 | goto err_unpin_hws; | 82 | goto err_unpin_hws; |
@@ -234,7 +234,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) | |||
234 | return ERR_CAST(obj); | 234 | return ERR_CAST(obj); |
235 | 235 | ||
236 | vaddr = i915_gem_object_pin_map(obj, | 236 | vaddr = i915_gem_object_pin_map(obj, |
237 | HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC); | 237 | i915_coherent_map_type(h->i915)); |
238 | if (IS_ERR(vaddr)) { | 238 | if (IS_ERR(vaddr)) { |
239 | i915_gem_object_put(obj); | 239 | i915_gem_object_put(obj); |
240 | return ERR_CAST(vaddr); | 240 | return ERR_CAST(vaddr); |
@@ -1150,6 +1150,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, | |||
1150 | tsk = NULL; | 1150 | tsk = NULL; |
1151 | goto out_reset; | 1151 | goto out_reset; |
1152 | } | 1152 | } |
1153 | get_task_struct(tsk); | ||
1153 | 1154 | ||
1154 | wait_for_completion(&arg.completion); | 1155 | wait_for_completion(&arg.completion); |
1155 | 1156 | ||
@@ -1172,6 +1173,8 @@ out_reset: | |||
1172 | /* The reset, even indirectly, should take less than 10ms. */ | 1173 | /* The reset, even indirectly, should take less than 10ms. */ |
1173 | igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) | 1174 | igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/) |
1174 | err = kthread_stop(tsk); | 1175 | err = kthread_stop(tsk); |
1176 | |||
1177 | put_task_struct(tsk); | ||
1175 | } | 1178 | } |
1176 | 1179 | ||
1177 | mutex_lock(&i915->drm.struct_mutex); | 1180 | mutex_lock(&i915->drm.struct_mutex); |
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 1aea7a8f2224..94fc0e5c8766 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "../i915_selftest.h" | 7 | #include "../i915_selftest.h" |
8 | #include "igt_flush_test.h" | 8 | #include "igt_flush_test.h" |
9 | #include "i915_random.h" | ||
9 | 10 | ||
10 | #include "mock_context.h" | 11 | #include "mock_context.h" |
11 | 12 | ||
@@ -48,7 +49,7 @@ static int spinner_init(struct spinner *spin, struct drm_i915_private *i915) | |||
48 | } | 49 | } |
49 | spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); | 50 | spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); |
50 | 51 | ||
51 | mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; | 52 | mode = i915_coherent_map_type(i915); |
52 | vaddr = i915_gem_object_pin_map(spin->obj, mode); | 53 | vaddr = i915_gem_object_pin_map(spin->obj, mode); |
53 | if (IS_ERR(vaddr)) { | 54 | if (IS_ERR(vaddr)) { |
54 | err = PTR_ERR(vaddr); | 55 | err = PTR_ERR(vaddr); |
@@ -291,12 +292,14 @@ static int live_preempt(void *arg) | |||
291 | ctx_hi = kernel_context(i915); | 292 | ctx_hi = kernel_context(i915); |
292 | if (!ctx_hi) | 293 | if (!ctx_hi) |
293 | goto err_spin_lo; | 294 | goto err_spin_lo; |
294 | ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; | 295 | ctx_hi->sched.priority = |
296 | I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); | ||
295 | 297 | ||
296 | ctx_lo = kernel_context(i915); | 298 | ctx_lo = kernel_context(i915); |
297 | if (!ctx_lo) | 299 | if (!ctx_lo) |
298 | goto err_ctx_hi; | 300 | goto err_ctx_hi; |
299 | ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; | 301 | ctx_lo->sched.priority = |
302 | I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); | ||
300 | 303 | ||
301 | for_each_engine(engine, i915, id) { | 304 | for_each_engine(engine, i915, id) { |
302 | struct i915_request *rq; | 305 | struct i915_request *rq; |
@@ -417,7 +420,7 @@ static int live_late_preempt(void *arg) | |||
417 | goto err_wedged; | 420 | goto err_wedged; |
418 | } | 421 | } |
419 | 422 | ||
420 | attr.priority = I915_PRIORITY_MAX; | 423 | attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); |
421 | engine->schedule(rq, &attr); | 424 | engine->schedule(rq, &attr); |
422 | 425 | ||
423 | if (!wait_for_spinner(&spin_hi, rq)) { | 426 | if (!wait_for_spinner(&spin_hi, rq)) { |
@@ -573,6 +576,261 @@ err_unlock: | |||
573 | return err; | 576 | return err; |
574 | } | 577 | } |
575 | 578 | ||
579 | static int random_range(struct rnd_state *rnd, int min, int max) | ||
580 | { | ||
581 | return i915_prandom_u32_max_state(max - min, rnd) + min; | ||
582 | } | ||
583 | |||
584 | static int random_priority(struct rnd_state *rnd) | ||
585 | { | ||
586 | return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); | ||
587 | } | ||
588 | |||
589 | struct preempt_smoke { | ||
590 | struct drm_i915_private *i915; | ||
591 | struct i915_gem_context **contexts; | ||
592 | struct intel_engine_cs *engine; | ||
593 | struct drm_i915_gem_object *batch; | ||
594 | unsigned int ncontext; | ||
595 | struct rnd_state prng; | ||
596 | unsigned long count; | ||
597 | }; | ||
598 | |||
599 | static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) | ||
600 | { | ||
601 | return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, | ||
602 | &smoke->prng)]; | ||
603 | } | ||
604 | |||
605 | static int smoke_submit(struct preempt_smoke *smoke, | ||
606 | struct i915_gem_context *ctx, int prio, | ||
607 | struct drm_i915_gem_object *batch) | ||
608 | { | ||
609 | struct i915_request *rq; | ||
610 | struct i915_vma *vma = NULL; | ||
611 | int err = 0; | ||
612 | |||
613 | if (batch) { | ||
614 | vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); | ||
615 | if (IS_ERR(vma)) | ||
616 | return PTR_ERR(vma); | ||
617 | |||
618 | err = i915_vma_pin(vma, 0, 0, PIN_USER); | ||
619 | if (err) | ||
620 | return err; | ||
621 | } | ||
622 | |||
623 | ctx->sched.priority = prio; | ||
624 | |||
625 | rq = i915_request_alloc(smoke->engine, ctx); | ||
626 | if (IS_ERR(rq)) { | ||
627 | err = PTR_ERR(rq); | ||
628 | goto unpin; | ||
629 | } | ||
630 | |||
631 | if (vma) { | ||
632 | err = rq->engine->emit_bb_start(rq, | ||
633 | vma->node.start, | ||
634 | PAGE_SIZE, 0); | ||
635 | if (!err) | ||
636 | err = i915_vma_move_to_active(vma, rq, 0); | ||
637 | } | ||
638 | |||
639 | i915_request_add(rq); | ||
640 | |||
641 | unpin: | ||
642 | if (vma) | ||
643 | i915_vma_unpin(vma); | ||
644 | |||
645 | return err; | ||
646 | } | ||
647 | |||
648 | static int smoke_crescendo_thread(void *arg) | ||
649 | { | ||
650 | struct preempt_smoke *smoke = arg; | ||
651 | IGT_TIMEOUT(end_time); | ||
652 | unsigned long count; | ||
653 | |||
654 | count = 0; | ||
655 | do { | ||
656 | struct i915_gem_context *ctx = smoke_context(smoke); | ||
657 | int err; | ||
658 | |||
659 | mutex_lock(&smoke->i915->drm.struct_mutex); | ||
660 | err = smoke_submit(smoke, | ||
661 | ctx, count % I915_PRIORITY_MAX, | ||
662 | smoke->batch); | ||
663 | mutex_unlock(&smoke->i915->drm.struct_mutex); | ||
664 | if (err) | ||
665 | return err; | ||
666 | |||
667 | count++; | ||
668 | } while (!__igt_timeout(end_time, NULL)); | ||
669 | |||
670 | smoke->count = count; | ||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) | ||
675 | #define BATCH BIT(0) | ||
676 | { | ||
677 | struct task_struct *tsk[I915_NUM_ENGINES] = {}; | ||
678 | struct preempt_smoke arg[I915_NUM_ENGINES]; | ||
679 | struct intel_engine_cs *engine; | ||
680 | enum intel_engine_id id; | ||
681 | unsigned long count; | ||
682 | int err = 0; | ||
683 | |||
684 | mutex_unlock(&smoke->i915->drm.struct_mutex); | ||
685 | |||
686 | for_each_engine(engine, smoke->i915, id) { | ||
687 | arg[id] = *smoke; | ||
688 | arg[id].engine = engine; | ||
689 | if (!(flags & BATCH)) | ||
690 | arg[id].batch = NULL; | ||
691 | arg[id].count = 0; | ||
692 | |||
693 | tsk[id] = kthread_run(smoke_crescendo_thread, &arg, | ||
694 | "igt/smoke:%d", id); | ||
695 | if (IS_ERR(tsk[id])) { | ||
696 | err = PTR_ERR(tsk[id]); | ||
697 | break; | ||
698 | } | ||
699 | get_task_struct(tsk[id]); | ||
700 | } | ||
701 | |||
702 | count = 0; | ||
703 | for_each_engine(engine, smoke->i915, id) { | ||
704 | int status; | ||
705 | |||
706 | if (IS_ERR_OR_NULL(tsk[id])) | ||
707 | continue; | ||
708 | |||
709 | status = kthread_stop(tsk[id]); | ||
710 | if (status && !err) | ||
711 | err = status; | ||
712 | |||
713 | count += arg[id].count; | ||
714 | |||
715 | put_task_struct(tsk[id]); | ||
716 | } | ||
717 | |||
718 | mutex_lock(&smoke->i915->drm.struct_mutex); | ||
719 | |||
720 | pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", | ||
721 | count, flags, | ||
722 | INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); | ||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) | ||
727 | { | ||
728 | enum intel_engine_id id; | ||
729 | IGT_TIMEOUT(end_time); | ||
730 | unsigned long count; | ||
731 | |||
732 | count = 0; | ||
733 | do { | ||
734 | for_each_engine(smoke->engine, smoke->i915, id) { | ||
735 | struct i915_gem_context *ctx = smoke_context(smoke); | ||
736 | int err; | ||
737 | |||
738 | err = smoke_submit(smoke, | ||
739 | ctx, random_priority(&smoke->prng), | ||
740 | flags & BATCH ? smoke->batch : NULL); | ||
741 | if (err) | ||
742 | return err; | ||
743 | |||
744 | count++; | ||
745 | } | ||
746 | } while (!__igt_timeout(end_time, NULL)); | ||
747 | |||
748 | pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", | ||
749 | count, flags, | ||
750 | INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); | ||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | static int live_preempt_smoke(void *arg) | ||
755 | { | ||
756 | struct preempt_smoke smoke = { | ||
757 | .i915 = arg, | ||
758 | .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), | ||
759 | .ncontext = 1024, | ||
760 | }; | ||
761 | const unsigned int phase[] = { 0, BATCH }; | ||
762 | int err = -ENOMEM; | ||
763 | u32 *cs; | ||
764 | int n; | ||
765 | |||
766 | if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) | ||
767 | return 0; | ||
768 | |||
769 | smoke.contexts = kmalloc_array(smoke.ncontext, | ||
770 | sizeof(*smoke.contexts), | ||
771 | GFP_KERNEL); | ||
772 | if (!smoke.contexts) | ||
773 | return -ENOMEM; | ||
774 | |||
775 | mutex_lock(&smoke.i915->drm.struct_mutex); | ||
776 | intel_runtime_pm_get(smoke.i915); | ||
777 | |||
778 | smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); | ||
779 | if (IS_ERR(smoke.batch)) { | ||
780 | err = PTR_ERR(smoke.batch); | ||
781 | goto err_unlock; | ||
782 | } | ||
783 | |||
784 | cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); | ||
785 | if (IS_ERR(cs)) { | ||
786 | err = PTR_ERR(cs); | ||
787 | goto err_batch; | ||
788 | } | ||
789 | for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) | ||
790 | cs[n] = MI_ARB_CHECK; | ||
791 | cs[n] = MI_BATCH_BUFFER_END; | ||
792 | i915_gem_object_unpin_map(smoke.batch); | ||
793 | |||
794 | err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); | ||
795 | if (err) | ||
796 | goto err_batch; | ||
797 | |||
798 | for (n = 0; n < smoke.ncontext; n++) { | ||
799 | smoke.contexts[n] = kernel_context(smoke.i915); | ||
800 | if (!smoke.contexts[n]) | ||
801 | goto err_ctx; | ||
802 | } | ||
803 | |||
804 | for (n = 0; n < ARRAY_SIZE(phase); n++) { | ||
805 | err = smoke_crescendo(&smoke, phase[n]); | ||
806 | if (err) | ||
807 | goto err_ctx; | ||
808 | |||
809 | err = smoke_random(&smoke, phase[n]); | ||
810 | if (err) | ||
811 | goto err_ctx; | ||
812 | } | ||
813 | |||
814 | err_ctx: | ||
815 | if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED)) | ||
816 | err = -EIO; | ||
817 | |||
818 | for (n = 0; n < smoke.ncontext; n++) { | ||
819 | if (!smoke.contexts[n]) | ||
820 | break; | ||
821 | kernel_context_close(smoke.contexts[n]); | ||
822 | } | ||
823 | |||
824 | err_batch: | ||
825 | i915_gem_object_put(smoke.batch); | ||
826 | err_unlock: | ||
827 | intel_runtime_pm_put(smoke.i915); | ||
828 | mutex_unlock(&smoke.i915->drm.struct_mutex); | ||
829 | kfree(smoke.contexts); | ||
830 | |||
831 | return err; | ||
832 | } | ||
833 | |||
576 | int intel_execlists_live_selftests(struct drm_i915_private *i915) | 834 | int intel_execlists_live_selftests(struct drm_i915_private *i915) |
577 | { | 835 | { |
578 | static const struct i915_subtest tests[] = { | 836 | static const struct i915_subtest tests[] = { |
@@ -580,6 +838,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) | |||
580 | SUBTEST(live_preempt), | 838 | SUBTEST(live_preempt), |
581 | SUBTEST(live_late_preempt), | 839 | SUBTEST(live_late_preempt), |
582 | SUBTEST(live_preempt_hang), | 840 | SUBTEST(live_preempt_hang), |
841 | SUBTEST(live_preempt_smoke), | ||
583 | }; | 842 | }; |
584 | 843 | ||
585 | if (!HAS_EXECLISTS(i915)) | 844 | if (!HAS_EXECLISTS(i915)) |
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 22a73da45ad5..d0c44c18db42 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c | |||
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, | |||
200 | engine->base.submit_request = mock_submit_request; | 200 | engine->base.submit_request = mock_submit_request; |
201 | 201 | ||
202 | i915_timeline_init(i915, &engine->base.timeline, engine->base.name); | 202 | i915_timeline_init(i915, &engine->base.timeline, engine->base.name); |
203 | lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE); | 203 | i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); |
204 | 204 | ||
205 | intel_engine_init_breadcrumbs(&engine->base); | 205 | intel_engine_init_breadcrumbs(&engine->base); |
206 | engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ | 206 | engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */ |
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 435a2c35ee8c..361e962a7969 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c | |||
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = { | |||
206 | .transfer = intel_dsi_host_transfer, | 206 | .transfer = intel_dsi_host_transfer, |
207 | }; | 207 | }; |
208 | 208 | ||
209 | static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, | ||
210 | enum port port) | ||
211 | { | ||
212 | struct intel_dsi_host *host; | ||
213 | struct mipi_dsi_device *device; | ||
214 | |||
215 | host = kzalloc(sizeof(*host), GFP_KERNEL); | ||
216 | if (!host) | ||
217 | return NULL; | ||
218 | |||
219 | host->base.ops = &intel_dsi_host_ops; | ||
220 | host->intel_dsi = intel_dsi; | ||
221 | host->port = port; | ||
222 | |||
223 | /* | ||
224 | * We should call mipi_dsi_host_register(&host->base) here, but we don't | ||
225 | * have a host->dev, and we don't have OF stuff either. So just use the | ||
226 | * dsi framework as a library and hope for the best. Create the dsi | ||
227 | * devices by ourselves here too. Need to be careful though, because we | ||
228 | * don't initialize any of the driver model devices here. | ||
229 | */ | ||
230 | device = kzalloc(sizeof(*device), GFP_KERNEL); | ||
231 | if (!device) { | ||
232 | kfree(host); | ||
233 | return NULL; | ||
234 | } | ||
235 | |||
236 | device->host = &host->base; | ||
237 | host->device = device; | ||
238 | |||
239 | return host; | ||
240 | } | ||
241 | |||
242 | /* | 209 | /* |
243 | * send a video mode command | 210 | * send a video mode command |
244 | * | 211 | * |
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv) | |||
290 | mutex_unlock(&dev_priv->sb_lock); | 257 | mutex_unlock(&dev_priv->sb_lock); |
291 | } | 258 | } |
292 | 259 | ||
293 | static inline bool is_vid_mode(struct intel_dsi *intel_dsi) | ||
294 | { | ||
295 | return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE; | ||
296 | } | ||
297 | |||
298 | static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) | ||
299 | { | ||
300 | return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE; | ||
301 | } | ||
302 | |||
303 | static bool intel_dsi_compute_config(struct intel_encoder *encoder, | 260 | static bool intel_dsi_compute_config(struct intel_encoder *encoder, |
304 | struct intel_crtc_state *pipe_config, | 261 | struct intel_crtc_state *pipe_config, |
305 | struct drm_connector_state *conn_state) | 262 | struct drm_connector_state *conn_state) |
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, | |||
314 | int ret; | 271 | int ret; |
315 | 272 | ||
316 | DRM_DEBUG_KMS("\n"); | 273 | DRM_DEBUG_KMS("\n"); |
274 | pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; | ||
317 | 275 | ||
318 | if (fixed_mode) { | 276 | if (fixed_mode) { |
319 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); | 277 | intel_fixed_panel_mode(fixed_mode, adjusted_mode); |
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, | |||
745 | const struct intel_crtc_state *pipe_config); | 703 | const struct intel_crtc_state *pipe_config); |
746 | static void intel_dsi_unprepare(struct intel_encoder *encoder); | 704 | static void intel_dsi_unprepare(struct intel_encoder *encoder); |
747 | 705 | ||
748 | static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) | ||
749 | { | ||
750 | struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); | ||
751 | |||
752 | /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */ | ||
753 | if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3) | ||
754 | return; | ||
755 | |||
756 | msleep(msec); | ||
757 | } | ||
758 | |||
759 | /* | 706 | /* |
760 | * Panel enable/disable sequences from the VBT spec. | 707 | * Panel enable/disable sequences from the VBT spec. |
761 | * | 708 | * |
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) | |||
793 | * - wait t4 - wait t4 | 740 | * - wait t4 - wait t4 |
794 | */ | 741 | */ |
795 | 742 | ||
743 | /* | ||
744 | * DSI port enable has to be done before pipe and plane enable, so we do it in | ||
745 | * the pre_enable hook instead of the enable hook. | ||
746 | */ | ||
796 | static void intel_dsi_pre_enable(struct intel_encoder *encoder, | 747 | static void intel_dsi_pre_enable(struct intel_encoder *encoder, |
797 | const struct intel_crtc_state *pipe_config, | 748 | const struct intel_crtc_state *pipe_config, |
798 | const struct drm_connector_state *conn_state) | 749 | const struct drm_connector_state *conn_state) |
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, | |||
895 | } | 846 | } |
896 | 847 | ||
897 | /* | 848 | /* |
898 | * DSI port enable has to be done before pipe and plane enable, so we do it in | ||
899 | * the pre_enable hook. | ||
900 | */ | ||
901 | static void intel_dsi_enable_nop(struct intel_encoder *encoder, | ||
902 | const struct intel_crtc_state *pipe_config, | ||
903 | const struct drm_connector_state *conn_state) | ||
904 | { | ||
905 | DRM_DEBUG_KMS("\n"); | ||
906 | } | ||
907 | |||
908 | /* | ||
909 | * DSI port disable has to be done after pipe and plane disable, so we do it in | 849 | * DSI port disable has to be done after pipe and plane disable, so we do it in |
910 | * the post_disable hook. | 850 | * the post_disable hook. |
911 | */ | 851 | */ |
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, | |||
1272 | } | 1212 | } |
1273 | } | 1213 | } |
1274 | 1214 | ||
1275 | static enum drm_mode_status | ||
1276 | intel_dsi_mode_valid(struct drm_connector *connector, | ||
1277 | struct drm_display_mode *mode) | ||
1278 | { | ||
1279 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1280 | const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; | ||
1281 | int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; | ||
1282 | |||
1283 | DRM_DEBUG_KMS("\n"); | ||
1284 | |||
1285 | if (mode->flags & DRM_MODE_FLAG_DBLSCAN) | ||
1286 | return MODE_NO_DBLESCAN; | ||
1287 | |||
1288 | if (fixed_mode) { | ||
1289 | if (mode->hdisplay > fixed_mode->hdisplay) | ||
1290 | return MODE_PANEL; | ||
1291 | if (mode->vdisplay > fixed_mode->vdisplay) | ||
1292 | return MODE_PANEL; | ||
1293 | if (fixed_mode->clock > max_dotclk) | ||
1294 | return MODE_CLOCK_HIGH; | ||
1295 | } | ||
1296 | |||
1297 | return MODE_OK; | ||
1298 | } | ||
1299 | |||
1300 | /* return txclkesc cycles in terms of divider and duration in us */ | 1215 | /* return txclkesc cycles in terms of divider and duration in us */ |
1301 | static u16 txclkesc(u32 divider, unsigned int us) | 1216 | static u16 txclkesc(u32 divider, unsigned int us) |
1302 | { | 1217 | { |
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) | |||
1619 | } | 1534 | } |
1620 | } | 1535 | } |
1621 | 1536 | ||
1622 | static int intel_dsi_get_modes(struct drm_connector *connector) | ||
1623 | { | ||
1624 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1625 | struct drm_display_mode *mode; | ||
1626 | |||
1627 | DRM_DEBUG_KMS("\n"); | ||
1628 | |||
1629 | if (!intel_connector->panel.fixed_mode) { | ||
1630 | DRM_DEBUG_KMS("no fixed mode\n"); | ||
1631 | return 0; | ||
1632 | } | ||
1633 | |||
1634 | mode = drm_mode_duplicate(connector->dev, | ||
1635 | intel_connector->panel.fixed_mode); | ||
1636 | if (!mode) { | ||
1637 | DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); | ||
1638 | return 0; | ||
1639 | } | ||
1640 | |||
1641 | drm_mode_probed_add(connector, mode); | ||
1642 | return 1; | ||
1643 | } | ||
1644 | |||
1645 | static void intel_dsi_connector_destroy(struct drm_connector *connector) | ||
1646 | { | ||
1647 | struct intel_connector *intel_connector = to_intel_connector(connector); | ||
1648 | |||
1649 | DRM_DEBUG_KMS("\n"); | ||
1650 | intel_panel_fini(&intel_connector->panel); | ||
1651 | drm_connector_cleanup(connector); | ||
1652 | kfree(connector); | ||
1653 | } | ||
1654 | |||
1655 | static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) | 1537 | static void intel_dsi_encoder_destroy(struct drm_encoder *encoder) |
1656 | { | 1538 | { |
1657 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); | 1539 | struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); |
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs | |||
1676 | static const struct drm_connector_funcs intel_dsi_connector_funcs = { | 1558 | static const struct drm_connector_funcs intel_dsi_connector_funcs = { |
1677 | .late_register = intel_connector_register, | 1559 | .late_register = intel_connector_register, |
1678 | .early_unregister = intel_connector_unregister, | 1560 | .early_unregister = intel_connector_unregister, |
1679 | .destroy = intel_dsi_connector_destroy, | 1561 | .destroy = intel_connector_destroy, |
1680 | .fill_modes = drm_helper_probe_single_connector_modes, | 1562 | .fill_modes = drm_helper_probe_single_connector_modes, |
1681 | .atomic_get_property = intel_digital_connector_atomic_get_property, | 1563 | .atomic_get_property = intel_digital_connector_atomic_get_property, |
1682 | .atomic_set_property = intel_digital_connector_atomic_set_property, | 1564 | .atomic_set_property = intel_digital_connector_atomic_set_property, |
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { | |||
1684 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, | 1566 | .atomic_duplicate_state = intel_digital_connector_duplicate_state, |
1685 | }; | 1567 | }; |
1686 | 1568 | ||
1687 | static int intel_dsi_get_panel_orientation(struct intel_connector *connector) | 1569 | static enum drm_panel_orientation |
1570 | vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector) | ||
1688 | { | 1571 | { |
1689 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 1572 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
1690 | int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; | 1573 | struct intel_encoder *encoder = connector->encoder; |
1691 | enum i9xx_plane_id i9xx_plane; | 1574 | enum intel_display_power_domain power_domain; |
1575 | enum drm_panel_orientation orientation; | ||
1576 | struct intel_plane *plane; | ||
1577 | struct intel_crtc *crtc; | ||
1578 | enum pipe pipe; | ||
1692 | u32 val; | 1579 | u32 val; |
1693 | 1580 | ||
1694 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 1581 | if (!encoder->get_hw_state(encoder, &pipe)) |
1695 | if (connector->encoder->crtc_mask == BIT(PIPE_B)) | 1582 | return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; |
1696 | i9xx_plane = PLANE_B; | ||
1697 | else | ||
1698 | i9xx_plane = PLANE_A; | ||
1699 | 1583 | ||
1700 | val = I915_READ(DSPCNTR(i9xx_plane)); | 1584 | crtc = intel_get_crtc_for_pipe(dev_priv, pipe); |
1701 | if (val & DISPPLANE_ROTATE_180) | 1585 | plane = to_intel_plane(crtc->base.primary); |
1702 | orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; | 1586 | |
1703 | } | 1587 | power_domain = POWER_DOMAIN_PIPE(pipe); |
1588 | if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) | ||
1589 | return DRM_MODE_PANEL_ORIENTATION_UNKNOWN; | ||
1590 | |||
1591 | val = I915_READ(DSPCNTR(plane->i9xx_plane)); | ||
1592 | |||
1593 | if (!(val & DISPLAY_PLANE_ENABLE)) | ||
1594 | orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; | ||
1595 | else if (val & DISPPLANE_ROTATE_180) | ||
1596 | orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP; | ||
1597 | else | ||
1598 | orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL; | ||
1599 | |||
1600 | intel_display_power_put(dev_priv, power_domain); | ||
1704 | 1601 | ||
1705 | return orientation; | 1602 | return orientation; |
1706 | } | 1603 | } |
1707 | 1604 | ||
1605 | static enum drm_panel_orientation | ||
1606 | vlv_dsi_get_panel_orientation(struct intel_connector *connector) | ||
1607 | { | ||
1608 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | ||
1609 | enum drm_panel_orientation orientation; | ||
1610 | |||
1611 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | ||
1612 | orientation = vlv_dsi_get_hw_panel_orientation(connector); | ||
1613 | if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) | ||
1614 | return orientation; | ||
1615 | } | ||
1616 | |||
1617 | return intel_dsi_get_panel_orientation(connector); | ||
1618 | } | ||
1619 | |||
1708 | static void intel_dsi_add_properties(struct intel_connector *connector) | 1620 | static void intel_dsi_add_properties(struct intel_connector *connector) |
1709 | { | 1621 | { |
1710 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); | 1622 | struct drm_i915_private *dev_priv = to_i915(connector->base.dev); |
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector) | |||
1722 | connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; | 1634 | connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT; |
1723 | 1635 | ||
1724 | connector->base.display_info.panel_orientation = | 1636 | connector->base.display_info.panel_orientation = |
1725 | intel_dsi_get_panel_orientation(connector); | 1637 | vlv_dsi_get_panel_orientation(connector); |
1726 | drm_connector_init_panel_orientation_property( | 1638 | drm_connector_init_panel_orientation_property( |
1727 | &connector->base, | 1639 | &connector->base, |
1728 | connector->panel.fixed_mode->hdisplay, | 1640 | connector->panel.fixed_mode->hdisplay, |
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) | |||
1773 | 1685 | ||
1774 | intel_encoder->compute_config = intel_dsi_compute_config; | 1686 | intel_encoder->compute_config = intel_dsi_compute_config; |
1775 | intel_encoder->pre_enable = intel_dsi_pre_enable; | 1687 | intel_encoder->pre_enable = intel_dsi_pre_enable; |
1776 | intel_encoder->enable = intel_dsi_enable_nop; | ||
1777 | intel_encoder->disable = intel_dsi_disable; | 1688 | intel_encoder->disable = intel_dsi_disable; |
1778 | intel_encoder->post_disable = intel_dsi_post_disable; | 1689 | intel_encoder->post_disable = intel_dsi_post_disable; |
1779 | intel_encoder->get_hw_state = intel_dsi_get_hw_state; | 1690 | intel_encoder->get_hw_state = intel_dsi_get_hw_state; |
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) | |||
1806 | for_each_dsi_port(port, intel_dsi->ports) { | 1717 | for_each_dsi_port(port, intel_dsi->ports) { |
1807 | struct intel_dsi_host *host; | 1718 | struct intel_dsi_host *host; |
1808 | 1719 | ||
1809 | host = intel_dsi_host_init(intel_dsi, port); | 1720 | host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops, |
1721 | port); | ||
1810 | if (!host) | 1722 | if (!host) |
1811 | goto err; | 1723 | goto err; |
1812 | 1724 | ||
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c index bcffe8ea642c..e95e0e7a7fa1 100644 --- a/drivers/gpu/drm/meson/meson_venc.c +++ b/drivers/gpu/drm/meson/meson_venc.c | |||
@@ -983,6 +983,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic, | |||
983 | unsigned int sof_lines; | 983 | unsigned int sof_lines; |
984 | unsigned int vsync_lines; | 984 | unsigned int vsync_lines; |
985 | 985 | ||
986 | /* Use VENCI for 480i and 576i and double HDMI pixels */ | ||
987 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) { | ||
988 | hdmi_repeat = true; | ||
989 | use_enci = true; | ||
990 | venc_hdmi_latency = 1; | ||
991 | } | ||
992 | |||
986 | if (meson_venc_hdmi_supported_vic(vic)) { | 993 | if (meson_venc_hdmi_supported_vic(vic)) { |
987 | vmode = meson_venc_hdmi_get_vic_vmode(vic); | 994 | vmode = meson_venc_hdmi_get_vic_vmode(vic); |
988 | if (!vmode) { | 995 | if (!vmode) { |
@@ -994,13 +1001,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic, | |||
994 | } else { | 1001 | } else { |
995 | meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); | 1002 | meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); |
996 | vmode = &vmode_dmt; | 1003 | vmode = &vmode_dmt; |
997 | } | 1004 | use_enci = false; |
998 | |||
999 | /* Use VENCI for 480i and 576i and double HDMI pixels */ | ||
1000 | if (mode->flags & DRM_MODE_FLAG_DBLCLK) { | ||
1001 | hdmi_repeat = true; | ||
1002 | use_enci = true; | ||
1003 | venc_hdmi_latency = 1; | ||
1004 | } | 1005 | } |
1005 | 1006 | ||
1006 | /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */ | 1007 | /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */ |
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 04f1dfba12e5..0aaedc554879 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h | |||
@@ -212,8 +212,6 @@ struct mga_device { | |||
212 | int fb_mtrr; | 212 | int fb_mtrr; |
213 | 213 | ||
214 | struct { | 214 | struct { |
215 | struct drm_global_reference mem_global_ref; | ||
216 | struct ttm_bo_global_ref bo_global_ref; | ||
217 | struct ttm_bo_device bdev; | 215 | struct ttm_bo_device bdev; |
218 | } ttm; | 216 | } ttm; |
219 | 217 | ||
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 05570f0de4d7..d96a9b32455e 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd) | |||
36 | return container_of(bd, struct mga_device, ttm.bdev); | 36 | return container_of(bd, struct mga_device, ttm.bdev); |
37 | } | 37 | } |
38 | 38 | ||
39 | static int | ||
40 | mgag200_ttm_mem_global_init(struct drm_global_reference *ref) | ||
41 | { | ||
42 | return ttm_mem_global_init(ref->object); | ||
43 | } | ||
44 | |||
45 | static void | ||
46 | mgag200_ttm_mem_global_release(struct drm_global_reference *ref) | ||
47 | { | ||
48 | ttm_mem_global_release(ref->object); | ||
49 | } | ||
50 | |||
51 | static int mgag200_ttm_global_init(struct mga_device *ast) | ||
52 | { | ||
53 | struct drm_global_reference *global_ref; | ||
54 | int r; | ||
55 | |||
56 | global_ref = &ast->ttm.mem_global_ref; | ||
57 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
58 | global_ref->size = sizeof(struct ttm_mem_global); | ||
59 | global_ref->init = &mgag200_ttm_mem_global_init; | ||
60 | global_ref->release = &mgag200_ttm_mem_global_release; | ||
61 | r = drm_global_item_ref(global_ref); | ||
62 | if (r != 0) { | ||
63 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
64 | "subsystem.\n"); | ||
65 | return r; | ||
66 | } | ||
67 | |||
68 | ast->ttm.bo_global_ref.mem_glob = | ||
69 | ast->ttm.mem_global_ref.object; | ||
70 | global_ref = &ast->ttm.bo_global_ref.ref; | ||
71 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
72 | global_ref->size = sizeof(struct ttm_bo_global); | ||
73 | global_ref->init = &ttm_bo_global_init; | ||
74 | global_ref->release = &ttm_bo_global_release; | ||
75 | r = drm_global_item_ref(global_ref); | ||
76 | if (r != 0) { | ||
77 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
78 | drm_global_item_unref(&ast->ttm.mem_global_ref); | ||
79 | return r; | ||
80 | } | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static void | ||
85 | mgag200_ttm_global_release(struct mga_device *ast) | ||
86 | { | ||
87 | if (ast->ttm.mem_global_ref.release == NULL) | ||
88 | return; | ||
89 | |||
90 | drm_global_item_unref(&ast->ttm.bo_global_ref.ref); | ||
91 | drm_global_item_unref(&ast->ttm.mem_global_ref); | ||
92 | ast->ttm.mem_global_ref.release = NULL; | ||
93 | } | ||
94 | |||
95 | |||
96 | static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo) | 39 | static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo) |
97 | { | 40 | { |
98 | struct mgag200_bo *bo; | 41 | struct mgag200_bo *bo; |
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev) | |||
232 | struct drm_device *dev = mdev->dev; | 175 | struct drm_device *dev = mdev->dev; |
233 | struct ttm_bo_device *bdev = &mdev->ttm.bdev; | 176 | struct ttm_bo_device *bdev = &mdev->ttm.bdev; |
234 | 177 | ||
235 | ret = mgag200_ttm_global_init(mdev); | ||
236 | if (ret) | ||
237 | return ret; | ||
238 | |||
239 | ret = ttm_bo_device_init(&mdev->ttm.bdev, | 178 | ret = ttm_bo_device_init(&mdev->ttm.bdev, |
240 | mdev->ttm.bo_global_ref.ref.object, | ||
241 | &mgag200_bo_driver, | 179 | &mgag200_bo_driver, |
242 | dev->anon_inode->i_mapping, | 180 | dev->anon_inode->i_mapping, |
243 | DRM_FILE_PAGE_OFFSET, | 181 | DRM_FILE_PAGE_OFFSET, |
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev) | |||
268 | 206 | ||
269 | ttm_bo_device_release(&mdev->ttm.bdev); | 207 | ttm_bo_device_release(&mdev->ttm.bdev); |
270 | 208 | ||
271 | mgag200_ttm_global_release(mdev); | ||
272 | |||
273 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), | 209 | arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), |
274 | pci_resource_len(dev->pdev, 0)); | 210 | pci_resource_len(dev->pdev, 0)); |
275 | arch_phys_wc_del(mdev->fb_mtrr); | 211 | arch_phys_wc_del(mdev->fb_mtrr); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 0b2191fa96f7..d20b9ba4b1c1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -146,8 +146,6 @@ struct nouveau_drm { | |||
146 | 146 | ||
147 | /* TTM interface support */ | 147 | /* TTM interface support */ |
148 | struct { | 148 | struct { |
149 | struct drm_global_reference mem_global_ref; | ||
150 | struct ttm_bo_global_ref bo_global_ref; | ||
151 | struct ttm_bo_device bdev; | 149 | struct ttm_bo_device bdev; |
152 | atomic_t validate_sequence; | 150 | atomic_t validate_sequence; |
153 | int (*move)(struct nouveau_channel *, | 151 | int (*move)(struct nouveau_channel *, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 8edb9f2a4269..1543c2f8d3d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c | |||
@@ -175,66 +175,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | static int | 177 | static int |
178 | nouveau_ttm_mem_global_init(struct drm_global_reference *ref) | ||
179 | { | ||
180 | return ttm_mem_global_init(ref->object); | ||
181 | } | ||
182 | |||
183 | static void | ||
184 | nouveau_ttm_mem_global_release(struct drm_global_reference *ref) | ||
185 | { | ||
186 | ttm_mem_global_release(ref->object); | ||
187 | } | ||
188 | |||
189 | int | ||
190 | nouveau_ttm_global_init(struct nouveau_drm *drm) | ||
191 | { | ||
192 | struct drm_global_reference *global_ref; | ||
193 | int ret; | ||
194 | |||
195 | global_ref = &drm->ttm.mem_global_ref; | ||
196 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
197 | global_ref->size = sizeof(struct ttm_mem_global); | ||
198 | global_ref->init = &nouveau_ttm_mem_global_init; | ||
199 | global_ref->release = &nouveau_ttm_mem_global_release; | ||
200 | |||
201 | ret = drm_global_item_ref(global_ref); | ||
202 | if (unlikely(ret != 0)) { | ||
203 | DRM_ERROR("Failed setting up TTM memory accounting\n"); | ||
204 | drm->ttm.mem_global_ref.release = NULL; | ||
205 | return ret; | ||
206 | } | ||
207 | |||
208 | drm->ttm.bo_global_ref.mem_glob = global_ref->object; | ||
209 | global_ref = &drm->ttm.bo_global_ref.ref; | ||
210 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
211 | global_ref->size = sizeof(struct ttm_bo_global); | ||
212 | global_ref->init = &ttm_bo_global_init; | ||
213 | global_ref->release = &ttm_bo_global_release; | ||
214 | |||
215 | ret = drm_global_item_ref(global_ref); | ||
216 | if (unlikely(ret != 0)) { | ||
217 | DRM_ERROR("Failed setting up TTM BO subsystem\n"); | ||
218 | drm_global_item_unref(&drm->ttm.mem_global_ref); | ||
219 | drm->ttm.mem_global_ref.release = NULL; | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | void | ||
227 | nouveau_ttm_global_release(struct nouveau_drm *drm) | ||
228 | { | ||
229 | if (drm->ttm.mem_global_ref.release == NULL) | ||
230 | return; | ||
231 | |||
232 | drm_global_item_unref(&drm->ttm.bo_global_ref.ref); | ||
233 | drm_global_item_unref(&drm->ttm.mem_global_ref); | ||
234 | drm->ttm.mem_global_ref.release = NULL; | ||
235 | } | ||
236 | |||
237 | static int | ||
238 | nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) | 178 | nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) |
239 | { | 179 | { |
240 | struct nvif_mmu *mmu = &drm->client.mmu; | 180 | struct nvif_mmu *mmu = &drm->client.mmu; |
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) | |||
296 | drm->agp.cma = pci->agp.cma; | 236 | drm->agp.cma = pci->agp.cma; |
297 | } | 237 | } |
298 | 238 | ||
299 | ret = nouveau_ttm_global_init(drm); | ||
300 | if (ret) | ||
301 | return ret; | ||
302 | |||
303 | ret = ttm_bo_device_init(&drm->ttm.bdev, | 239 | ret = ttm_bo_device_init(&drm->ttm.bdev, |
304 | drm->ttm.bo_global_ref.ref.object, | ||
305 | &nouveau_bo_driver, | 240 | &nouveau_bo_driver, |
306 | dev->anon_inode->i_mapping, | 241 | dev->anon_inode->i_mapping, |
307 | DRM_FILE_PAGE_OFFSET, | 242 | DRM_FILE_PAGE_OFFSET, |
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm) | |||
356 | 291 | ||
357 | ttm_bo_device_release(&drm->ttm.bdev); | 292 | ttm_bo_device_release(&drm->ttm.bdev); |
358 | 293 | ||
359 | nouveau_ttm_global_release(drm); | ||
360 | |||
361 | arch_phys_wc_del(drm->ttm.mtrr); | 294 | arch_phys_wc_del(drm->ttm.mtrr); |
362 | drm->ttm.mtrr = 0; | 295 | drm->ttm.mtrr = 0; |
363 | arch_io_free_memtype_wc(device->func->resource_addr(device, 1), | 296 | arch_io_free_memtype_wc(device->func->resource_addr(device, 1), |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 394c129cfb3b..0a485c5b982e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev) | |||
5409 | 5409 | ||
5410 | /* DSI on OMAP3 doesn't have register DSI_GNQ, set number | 5410 | /* DSI on OMAP3 doesn't have register DSI_GNQ, set number |
5411 | * of data to 3 by default */ | 5411 | * of data to 3 by default */ |
5412 | if (dsi->data->quirks & DSI_QUIRK_GNQ) | 5412 | if (dsi->data->quirks & DSI_QUIRK_GNQ) { |
5413 | dsi_runtime_get(dsi); | ||
5413 | /* NB_DATA_LANES */ | 5414 | /* NB_DATA_LANES */ |
5414 | dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); | 5415 | dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); |
5415 | else | 5416 | dsi_runtime_put(dsi); |
5417 | } else { | ||
5416 | dsi->num_lanes_supported = 3; | 5418 | dsi->num_lanes_supported = 3; |
5419 | } | ||
5417 | 5420 | ||
5418 | r = dsi_init_output(dsi); | 5421 | r = dsi_init_output(dsi); |
5419 | if (r) | 5422 | if (r) |
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev) | |||
5426 | } | 5429 | } |
5427 | 5430 | ||
5428 | r = of_platform_populate(dev->of_node, NULL, NULL, dev); | 5431 | r = of_platform_populate(dev->of_node, NULL, NULL, dev); |
5429 | if (r) | 5432 | if (r) { |
5430 | DSSERR("Failed to populate DSI child devices: %d\n", r); | 5433 | DSSERR("Failed to populate DSI child devices: %d\n", r); |
5434 | goto err_uninit_output; | ||
5435 | } | ||
5431 | 5436 | ||
5432 | r = component_add(&pdev->dev, &dsi_component_ops); | 5437 | r = component_add(&pdev->dev, &dsi_component_ops); |
5433 | if (r) | 5438 | if (r) |
5434 | goto err_uninit_output; | 5439 | goto err_of_depopulate; |
5435 | 5440 | ||
5436 | return 0; | 5441 | return 0; |
5437 | 5442 | ||
5443 | err_of_depopulate: | ||
5444 | of_platform_depopulate(dev); | ||
5438 | err_uninit_output: | 5445 | err_uninit_output: |
5439 | dsi_uninit_output(dsi); | 5446 | dsi_uninit_output(dsi); |
5440 | err_pm_disable: | 5447 | err_pm_disable: |
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev) | |||
5470 | /* wait for current handler to finish before turning the DSI off */ | 5477 | /* wait for current handler to finish before turning the DSI off */ |
5471 | synchronize_irq(dsi->irq); | 5478 | synchronize_irq(dsi->irq); |
5472 | 5479 | ||
5473 | dispc_runtime_put(dsi->dss->dispc); | ||
5474 | |||
5475 | return 0; | 5480 | return 0; |
5476 | } | 5481 | } |
5477 | 5482 | ||
5478 | static int dsi_runtime_resume(struct device *dev) | 5483 | static int dsi_runtime_resume(struct device *dev) |
5479 | { | 5484 | { |
5480 | struct dsi_data *dsi = dev_get_drvdata(dev); | 5485 | struct dsi_data *dsi = dev_get_drvdata(dev); |
5481 | int r; | ||
5482 | |||
5483 | r = dispc_runtime_get(dsi->dss->dispc); | ||
5484 | if (r) | ||
5485 | return r; | ||
5486 | 5486 | ||
5487 | dsi->is_enabled = true; | 5487 | dsi->is_enabled = true; |
5488 | /* ensure the irq handler sees the is_enabled value */ | 5488 | /* ensure the irq handler sees the is_enabled value */ |
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 1aaf260aa9b8..7553c7fc1c45 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c | |||
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev) | |||
1484 | dss); | 1484 | dss); |
1485 | 1485 | ||
1486 | /* Add all the child devices as components. */ | 1486 | /* Add all the child devices as components. */ |
1487 | r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
1488 | if (r) | ||
1489 | goto err_uninit_debugfs; | ||
1490 | |||
1487 | omapdss_gather_components(&pdev->dev); | 1491 | omapdss_gather_components(&pdev->dev); |
1488 | 1492 | ||
1489 | device_for_each_child(&pdev->dev, &match, dss_add_child_component); | 1493 | device_for_each_child(&pdev->dev, &match, dss_add_child_component); |
1490 | 1494 | ||
1491 | r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); | 1495 | r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); |
1492 | if (r) | 1496 | if (r) |
1493 | goto err_uninit_debugfs; | 1497 | goto err_of_depopulate; |
1494 | 1498 | ||
1495 | return 0; | 1499 | return 0; |
1496 | 1500 | ||
1501 | err_of_depopulate: | ||
1502 | of_platform_depopulate(&pdev->dev); | ||
1503 | |||
1497 | err_uninit_debugfs: | 1504 | err_uninit_debugfs: |
1498 | dss_debugfs_remove_file(dss->debugfs.clk); | 1505 | dss_debugfs_remove_file(dss->debugfs.clk); |
1499 | dss_debugfs_remove_file(dss->debugfs.dss); | 1506 | dss_debugfs_remove_file(dss->debugfs.dss); |
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev) | |||
1522 | { | 1529 | { |
1523 | struct dss_device *dss = platform_get_drvdata(pdev); | 1530 | struct dss_device *dss = platform_get_drvdata(pdev); |
1524 | 1531 | ||
1532 | of_platform_depopulate(&pdev->dev); | ||
1533 | |||
1525 | component_master_del(&pdev->dev, &dss_component_ops); | 1534 | component_master_del(&pdev->dev, &dss_component_ops); |
1526 | 1535 | ||
1527 | dss_debugfs_remove_file(dss->debugfs.clk); | 1536 | dss_debugfs_remove_file(dss->debugfs.clk); |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index cf6230eac31a..aabdda394c9c 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c | |||
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) | |||
635 | 635 | ||
636 | hdmi->dss = dss; | 636 | hdmi->dss = dss; |
637 | 637 | ||
638 | r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); | 638 | r = hdmi_runtime_get(hdmi); |
639 | if (r) | 639 | if (r) |
640 | return r; | 640 | return r; |
641 | 641 | ||
642 | r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); | ||
643 | if (r) | ||
644 | goto err_runtime_put; | ||
645 | |||
642 | r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); | 646 | r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); |
643 | if (r) | 647 | if (r) |
644 | goto err_pll_uninit; | 648 | goto err_pll_uninit; |
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data) | |||
652 | hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, | 656 | hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, |
653 | hdmi); | 657 | hdmi); |
654 | 658 | ||
659 | hdmi_runtime_put(hdmi); | ||
660 | |||
655 | return 0; | 661 | return 0; |
656 | 662 | ||
657 | err_cec_uninit: | 663 | err_cec_uninit: |
658 | hdmi4_cec_uninit(&hdmi->core); | 664 | hdmi4_cec_uninit(&hdmi->core); |
659 | err_pll_uninit: | 665 | err_pll_uninit: |
660 | hdmi_pll_uninit(&hdmi->pll); | 666 | hdmi_pll_uninit(&hdmi->pll); |
667 | err_runtime_put: | ||
668 | hdmi_runtime_put(hdmi); | ||
661 | return r; | 669 | return r; |
662 | } | 670 | } |
663 | 671 | ||
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev) | |||
833 | return 0; | 841 | return 0; |
834 | } | 842 | } |
835 | 843 | ||
836 | static int hdmi_runtime_suspend(struct device *dev) | ||
837 | { | ||
838 | struct omap_hdmi *hdmi = dev_get_drvdata(dev); | ||
839 | |||
840 | dispc_runtime_put(hdmi->dss->dispc); | ||
841 | |||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | static int hdmi_runtime_resume(struct device *dev) | ||
846 | { | ||
847 | struct omap_hdmi *hdmi = dev_get_drvdata(dev); | ||
848 | int r; | ||
849 | |||
850 | r = dispc_runtime_get(hdmi->dss->dispc); | ||
851 | if (r < 0) | ||
852 | return r; | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | static const struct dev_pm_ops hdmi_pm_ops = { | ||
858 | .runtime_suspend = hdmi_runtime_suspend, | ||
859 | .runtime_resume = hdmi_runtime_resume, | ||
860 | }; | ||
861 | |||
862 | static const struct of_device_id hdmi_of_match[] = { | 844 | static const struct of_device_id hdmi_of_match[] = { |
863 | { .compatible = "ti,omap4-hdmi", }, | 845 | { .compatible = "ti,omap4-hdmi", }, |
864 | {}, | 846 | {}, |
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = { | |||
869 | .remove = hdmi4_remove, | 851 | .remove = hdmi4_remove, |
870 | .driver = { | 852 | .driver = { |
871 | .name = "omapdss_hdmi", | 853 | .name = "omapdss_hdmi", |
872 | .pm = &hdmi_pm_ops, | ||
873 | .of_match_table = hdmi_of_match, | 854 | .of_match_table = hdmi_of_match, |
874 | .suppress_bind_attrs = true, | 855 | .suppress_bind_attrs = true, |
875 | }, | 856 | }, |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index b0e4a7463f8c..9e8556f67a29 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev) | |||
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | 827 | ||
828 | static int hdmi_runtime_suspend(struct device *dev) | ||
829 | { | ||
830 | struct omap_hdmi *hdmi = dev_get_drvdata(dev); | ||
831 | |||
832 | dispc_runtime_put(hdmi->dss->dispc); | ||
833 | |||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | static int hdmi_runtime_resume(struct device *dev) | ||
838 | { | ||
839 | struct omap_hdmi *hdmi = dev_get_drvdata(dev); | ||
840 | int r; | ||
841 | |||
842 | r = dispc_runtime_get(hdmi->dss->dispc); | ||
843 | if (r < 0) | ||
844 | return r; | ||
845 | |||
846 | return 0; | ||
847 | } | ||
848 | |||
849 | static const struct dev_pm_ops hdmi_pm_ops = { | ||
850 | .runtime_suspend = hdmi_runtime_suspend, | ||
851 | .runtime_resume = hdmi_runtime_resume, | ||
852 | }; | ||
853 | |||
854 | static const struct of_device_id hdmi_of_match[] = { | 828 | static const struct of_device_id hdmi_of_match[] = { |
855 | { .compatible = "ti,omap5-hdmi", }, | 829 | { .compatible = "ti,omap5-hdmi", }, |
856 | { .compatible = "ti,dra7-hdmi", }, | 830 | { .compatible = "ti,dra7-hdmi", }, |
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = { | |||
862 | .remove = hdmi5_remove, | 836 | .remove = hdmi5_remove, |
863 | .driver = { | 837 | .driver = { |
864 | .name = "omapdss_hdmi5", | 838 | .name = "omapdss_hdmi5", |
865 | .pm = &hdmi_pm_ops, | ||
866 | .of_match_table = hdmi_of_match, | 839 | .of_match_table = hdmi_of_match, |
867 | .suppress_bind_attrs = true, | 840 | .suppress_bind_attrs = true, |
868 | }, | 841 | }, |
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index ff0b18c8e4ac..b5f52727f8b1 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c | |||
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev) | |||
946 | if (venc->tv_dac_clk) | 946 | if (venc->tv_dac_clk) |
947 | clk_disable_unprepare(venc->tv_dac_clk); | 947 | clk_disable_unprepare(venc->tv_dac_clk); |
948 | 948 | ||
949 | dispc_runtime_put(venc->dss->dispc); | ||
950 | |||
951 | return 0; | 949 | return 0; |
952 | } | 950 | } |
953 | 951 | ||
954 | static int venc_runtime_resume(struct device *dev) | 952 | static int venc_runtime_resume(struct device *dev) |
955 | { | 953 | { |
956 | struct venc_device *venc = dev_get_drvdata(dev); | 954 | struct venc_device *venc = dev_get_drvdata(dev); |
957 | int r; | ||
958 | |||
959 | r = dispc_runtime_get(venc->dss->dispc); | ||
960 | if (r < 0) | ||
961 | return r; | ||
962 | 955 | ||
963 | if (venc->tv_dac_clk) | 956 | if (venc->tv_dac_clk) |
964 | clk_prepare_enable(venc->tv_dac_clk); | 957 | clk_prepare_enable(venc->tv_dac_clk); |
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 62928ec0e7db..caffc547ef97 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c | |||
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc) | |||
350 | static void omap_crtc_atomic_enable(struct drm_crtc *crtc, | 350 | static void omap_crtc_atomic_enable(struct drm_crtc *crtc, |
351 | struct drm_crtc_state *old_state) | 351 | struct drm_crtc_state *old_state) |
352 | { | 352 | { |
353 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
353 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | 354 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); |
354 | int ret; | 355 | int ret; |
355 | 356 | ||
356 | DBG("%s", omap_crtc->name); | 357 | DBG("%s", omap_crtc->name); |
357 | 358 | ||
359 | priv->dispc_ops->runtime_get(priv->dispc); | ||
360 | |||
358 | spin_lock_irq(&crtc->dev->event_lock); | 361 | spin_lock_irq(&crtc->dev->event_lock); |
359 | drm_crtc_vblank_on(crtc); | 362 | drm_crtc_vblank_on(crtc); |
360 | ret = drm_crtc_vblank_get(crtc); | 363 | ret = drm_crtc_vblank_get(crtc); |
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, | |||
367 | static void omap_crtc_atomic_disable(struct drm_crtc *crtc, | 370 | static void omap_crtc_atomic_disable(struct drm_crtc *crtc, |
368 | struct drm_crtc_state *old_state) | 371 | struct drm_crtc_state *old_state) |
369 | { | 372 | { |
373 | struct omap_drm_private *priv = crtc->dev->dev_private; | ||
370 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); | 374 | struct omap_crtc *omap_crtc = to_omap_crtc(crtc); |
371 | 375 | ||
372 | DBG("%s", omap_crtc->name); | 376 | DBG("%s", omap_crtc->name); |
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc, | |||
379 | spin_unlock_irq(&crtc->dev->event_lock); | 383 | spin_unlock_irq(&crtc->dev->event_lock); |
380 | 384 | ||
381 | drm_crtc_vblank_off(crtc); | 385 | drm_crtc_vblank_off(crtc); |
386 | |||
387 | priv->dispc_ops->runtime_put(priv->dispc); | ||
382 | } | 388 | } |
383 | 389 | ||
384 | static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, | 390 | static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 14d3fa855708..13a0254b59a1 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -126,9 +126,6 @@ struct qxl_output { | |||
126 | #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) | 126 | #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) |
127 | 127 | ||
128 | struct qxl_mman { | 128 | struct qxl_mman { |
129 | struct ttm_bo_global_ref bo_global_ref; | ||
130 | struct drm_global_reference mem_global_ref; | ||
131 | unsigned int mem_global_referenced:1; | ||
132 | struct ttm_bo_device bdev; | 129 | struct ttm_bo_device bdev; |
133 | }; | 130 | }; |
134 | 131 | ||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 559a10113837..886f61e94f24 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) | |||
46 | return qdev; | 46 | return qdev; |
47 | } | 47 | } |
48 | 48 | ||
49 | static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) | ||
50 | { | ||
51 | return ttm_mem_global_init(ref->object); | ||
52 | } | ||
53 | |||
54 | static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) | ||
55 | { | ||
56 | ttm_mem_global_release(ref->object); | ||
57 | } | ||
58 | |||
59 | static int qxl_ttm_global_init(struct qxl_device *qdev) | ||
60 | { | ||
61 | struct drm_global_reference *global_ref; | ||
62 | int r; | ||
63 | |||
64 | qdev->mman.mem_global_referenced = false; | ||
65 | global_ref = &qdev->mman.mem_global_ref; | ||
66 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
67 | global_ref->size = sizeof(struct ttm_mem_global); | ||
68 | global_ref->init = &qxl_ttm_mem_global_init; | ||
69 | global_ref->release = &qxl_ttm_mem_global_release; | ||
70 | |||
71 | r = drm_global_item_ref(global_ref); | ||
72 | if (r != 0) { | ||
73 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
74 | "subsystem.\n"); | ||
75 | return r; | ||
76 | } | ||
77 | |||
78 | qdev->mman.bo_global_ref.mem_glob = | ||
79 | qdev->mman.mem_global_ref.object; | ||
80 | global_ref = &qdev->mman.bo_global_ref.ref; | ||
81 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
82 | global_ref->size = sizeof(struct ttm_bo_global); | ||
83 | global_ref->init = &ttm_bo_global_init; | ||
84 | global_ref->release = &ttm_bo_global_release; | ||
85 | r = drm_global_item_ref(global_ref); | ||
86 | if (r != 0) { | ||
87 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
88 | drm_global_item_unref(&qdev->mman.mem_global_ref); | ||
89 | return r; | ||
90 | } | ||
91 | |||
92 | qdev->mman.mem_global_referenced = true; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static void qxl_ttm_global_fini(struct qxl_device *qdev) | ||
97 | { | ||
98 | if (qdev->mman.mem_global_referenced) { | ||
99 | drm_global_item_unref(&qdev->mman.bo_global_ref.ref); | ||
100 | drm_global_item_unref(&qdev->mman.mem_global_ref); | ||
101 | qdev->mman.mem_global_referenced = false; | ||
102 | } | ||
103 | } | ||
104 | |||
105 | static struct vm_operations_struct qxl_ttm_vm_ops; | 49 | static struct vm_operations_struct qxl_ttm_vm_ops; |
106 | static const struct vm_operations_struct *ttm_vm_ops; | 50 | static const struct vm_operations_struct *ttm_vm_ops; |
107 | 51 | ||
@@ -372,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev) | |||
372 | int r; | 316 | int r; |
373 | int num_io_pages; /* != rom->num_io_pages, we include surface0 */ | 317 | int num_io_pages; /* != rom->num_io_pages, we include surface0 */ |
374 | 318 | ||
375 | r = qxl_ttm_global_init(qdev); | ||
376 | if (r) | ||
377 | return r; | ||
378 | /* No others user of address space so set it to 0 */ | 319 | /* No others user of address space so set it to 0 */ |
379 | r = ttm_bo_device_init(&qdev->mman.bdev, | 320 | r = ttm_bo_device_init(&qdev->mman.bdev, |
380 | qdev->mman.bo_global_ref.ref.object, | ||
381 | &qxl_bo_driver, | 321 | &qxl_bo_driver, |
382 | qdev->ddev.anon_inode->i_mapping, | 322 | qdev->ddev.anon_inode->i_mapping, |
383 | DRM_FILE_PAGE_OFFSET, 0); | 323 | DRM_FILE_PAGE_OFFSET, 0); |
@@ -413,7 +353,6 @@ void qxl_ttm_fini(struct qxl_device *qdev) | |||
413 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); | 353 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); |
414 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV); | 354 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV); |
415 | ttm_bo_device_release(&qdev->mman.bdev); | 355 | ttm_bo_device_release(&qdev->mman.bdev); |
416 | qxl_ttm_global_fini(qdev); | ||
417 | DRM_INFO("qxl: ttm finalized\n"); | 356 | DRM_INFO("qxl: ttm finalized\n"); |
418 | } | 357 | } |
419 | 358 | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 21161aa8acbf..652126fd6dd4 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
814 | ((idx_value >> 21) & 0xF)); | 814 | ((idx_value >> 21) & 0xF)); |
815 | return -EINVAL; | 815 | return -EINVAL; |
816 | } | 816 | } |
817 | /* Pass through. */ | 817 | /* Fall through. */ |
818 | case 6: | 818 | case 6: |
819 | track->cb[i].cpp = 4; | 819 | track->cb[i].cpp = 4; |
820 | break; | 820 | break; |
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
965 | return -EINVAL; | 965 | return -EINVAL; |
966 | } | 966 | } |
967 | /* The same rules apply as for DXT3/5. */ | 967 | /* The same rules apply as for DXT3/5. */ |
968 | /* Pass through. */ | 968 | /* Fall through. */ |
969 | case R300_TX_FORMAT_DXT3: | 969 | case R300_TX_FORMAT_DXT3: |
970 | case R300_TX_FORMAT_DXT5: | 970 | case R300_TX_FORMAT_DXT5: |
971 | track->textures[i].cpp = 1; | 971 | track->textures[i].cpp = 1; |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 45e1d4e60759..2318d9e3ed96 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev) | |||
109 | default: | 109 | default: |
110 | /* force to 1 pipe */ | 110 | /* force to 1 pipe */ |
111 | num_pipes = 1; | 111 | num_pipes = 1; |
112 | /* fall through */ | ||
112 | case 1: | 113 | case 1: |
113 | tmp = (0 << 1); | 114 | tmp = (0 << 1); |
114 | break; | 115 | break; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1a6f6edb3515..32808e50be12 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -448,10 +448,7 @@ struct radeon_surface_reg { | |||
448 | * TTM. | 448 | * TTM. |
449 | */ | 449 | */ |
450 | struct radeon_mman { | 450 | struct radeon_mman { |
451 | struct ttm_bo_global_ref bo_global_ref; | ||
452 | struct drm_global_reference mem_global_ref; | ||
453 | struct ttm_bo_device bdev; | 451 | struct ttm_bo_device bdev; |
454 | bool mem_global_referenced; | ||
455 | bool initialized; | 452 | bool initialized; |
456 | 453 | ||
457 | #if defined(CONFIG_DEBUG_FS) | 454 | #if defined(CONFIG_DEBUG_FS) |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 4278272e3191..3dae2c4dec71 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c | |||
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode | |||
421 | 421 | ||
422 | static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder) | 422 | static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder) |
423 | { | 423 | { |
424 | struct drm_device *dev = encoder->dev; | ||
425 | struct radeon_device *rdev = dev->dev_private; | ||
426 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 424 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
427 | struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; | 425 | struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; |
428 | struct radeon_crtc *radeon_crtc; | ||
429 | int restart; | 426 | int restart; |
430 | unsigned int h_total, v_total, f_total; | 427 | unsigned int h_total, v_total, f_total; |
431 | int v_offset, h_offset; | 428 | int v_offset, h_offset; |
432 | u16 p1, p2, h_inc; | 429 | u16 p1, p2, h_inc; |
433 | bool h_changed; | 430 | bool h_changed; |
434 | const struct radeon_tv_mode_constants *const_ptr; | 431 | const struct radeon_tv_mode_constants *const_ptr; |
435 | struct radeon_pll *pll; | ||
436 | |||
437 | radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc); | ||
438 | if (radeon_crtc->crtc_id == 1) | ||
439 | pll = &rdev->clock.p2pll; | ||
440 | else | ||
441 | pll = &rdev->clock.p1pll; | ||
442 | 432 | ||
443 | const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL); | 433 | const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL); |
444 | if (!const_ptr) | 434 | if (!const_ptr) |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 92f6d4002eea..833e909706a9 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) | |||
314 | void radeon_bo_unref(struct radeon_bo **bo) | 314 | void radeon_bo_unref(struct radeon_bo **bo) |
315 | { | 315 | { |
316 | struct ttm_buffer_object *tbo; | 316 | struct ttm_buffer_object *tbo; |
317 | struct radeon_device *rdev; | ||
318 | 317 | ||
319 | if ((*bo) == NULL) | 318 | if ((*bo) == NULL) |
320 | return; | 319 | return; |
321 | rdev = (*bo)->rdev; | ||
322 | tbo = &((*bo)->tbo); | 320 | tbo = &((*bo)->tbo); |
323 | ttm_bo_put(tbo); | 321 | ttm_bo_put(tbo); |
324 | *bo = NULL; | 322 | *bo = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cbb67e9ffb3a..9920a6fc11bf 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) | |||
60 | return rdev; | 60 | return rdev; |
61 | } | 61 | } |
62 | 62 | ||
63 | |||
64 | /* | ||
65 | * Global memory. | ||
66 | */ | ||
67 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) | ||
68 | { | ||
69 | return ttm_mem_global_init(ref->object); | ||
70 | } | ||
71 | |||
72 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) | ||
73 | { | ||
74 | ttm_mem_global_release(ref->object); | ||
75 | } | ||
76 | |||
77 | static int radeon_ttm_global_init(struct radeon_device *rdev) | ||
78 | { | ||
79 | struct drm_global_reference *global_ref; | ||
80 | int r; | ||
81 | |||
82 | rdev->mman.mem_global_referenced = false; | ||
83 | global_ref = &rdev->mman.mem_global_ref; | ||
84 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
85 | global_ref->size = sizeof(struct ttm_mem_global); | ||
86 | global_ref->init = &radeon_ttm_mem_global_init; | ||
87 | global_ref->release = &radeon_ttm_mem_global_release; | ||
88 | r = drm_global_item_ref(global_ref); | ||
89 | if (r != 0) { | ||
90 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
91 | "subsystem.\n"); | ||
92 | return r; | ||
93 | } | ||
94 | |||
95 | rdev->mman.bo_global_ref.mem_glob = | ||
96 | rdev->mman.mem_global_ref.object; | ||
97 | global_ref = &rdev->mman.bo_global_ref.ref; | ||
98 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
99 | global_ref->size = sizeof(struct ttm_bo_global); | ||
100 | global_ref->init = &ttm_bo_global_init; | ||
101 | global_ref->release = &ttm_bo_global_release; | ||
102 | r = drm_global_item_ref(global_ref); | ||
103 | if (r != 0) { | ||
104 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
105 | drm_global_item_unref(&rdev->mman.mem_global_ref); | ||
106 | return r; | ||
107 | } | ||
108 | |||
109 | rdev->mman.mem_global_referenced = true; | ||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | ||
114 | { | ||
115 | if (rdev->mman.mem_global_referenced) { | ||
116 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); | ||
117 | drm_global_item_unref(&rdev->mman.mem_global_ref); | ||
118 | rdev->mman.mem_global_referenced = false; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | 63 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
123 | { | 64 | { |
124 | return 0; | 65 | return 0; |
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
847 | { | 788 | { |
848 | int r; | 789 | int r; |
849 | 790 | ||
850 | r = radeon_ttm_global_init(rdev); | ||
851 | if (r) { | ||
852 | return r; | ||
853 | } | ||
854 | /* No others user of address space so set it to 0 */ | 791 | /* No others user of address space so set it to 0 */ |
855 | r = ttm_bo_device_init(&rdev->mman.bdev, | 792 | r = ttm_bo_device_init(&rdev->mman.bdev, |
856 | rdev->mman.bo_global_ref.ref.object, | ||
857 | &radeon_bo_driver, | 793 | &radeon_bo_driver, |
858 | rdev->ddev->anon_inode->i_mapping, | 794 | rdev->ddev->anon_inode->i_mapping, |
859 | DRM_FILE_PAGE_OFFSET, | 795 | DRM_FILE_PAGE_OFFSET, |
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
925 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | 861 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
926 | ttm_bo_device_release(&rdev->mman.bdev); | 862 | ttm_bo_device_release(&rdev->mman.bdev); |
927 | radeon_gart_fini(rdev); | 863 | radeon_gart_fini(rdev); |
928 | radeon_ttm_global_fini(rdev); | ||
929 | rdev->mman.initialized = false; | 864 | rdev->mman.initialized = false; |
930 | DRM_INFO("radeon: ttm finalized\n"); | 865 | DRM_INFO("radeon: ttm finalized\n"); |
931 | } | 866 | } |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 17741843cf51..90dacab67be5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c | |||
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | |||
226 | * system clock, and have no internal clock divider. | 226 | * system clock, and have no internal clock divider. |
227 | */ | 227 | */ |
228 | 228 | ||
229 | if (WARN_ON(!rcrtc->extclock)) | ||
230 | return; | ||
231 | |||
232 | /* | 229 | /* |
233 | * The H3 ES1.x exhibits dot clock duty cycle stability issues. | 230 | * The H3 ES1.x exhibits dot clock duty cycle stability issues. |
234 | * We can work around them by configuring the DPLL to twice the | 231 | * We can work around them by configuring the DPLL to twice the |
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, | |||
701 | * CRTC will be put later in .atomic_disable(). | 698 | * CRTC will be put later in .atomic_disable(). |
702 | * | 699 | * |
703 | * If a mode set is not in progress the CRTC is enabled, and the | 700 | * If a mode set is not in progress the CRTC is enabled, and the |
704 | * following get call will be a no-op. There is thus no need to belance | 701 | * following get call will be a no-op. There is thus no need to balance |
705 | * it in .atomic_flush() either. | 702 | * it in .atomic_flush() either. |
706 | */ | 703 | */ |
707 | rcar_du_crtc_get(rcrtc); | 704 | rcar_du_crtc_get(rcrtc); |
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc, | |||
738 | struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); | 735 | struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); |
739 | struct rcar_du_device *rcdu = rcrtc->group->dev; | 736 | struct rcar_du_device *rcdu = rcrtc->group->dev; |
740 | bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; | 737 | bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; |
738 | unsigned int vbp; | ||
741 | 739 | ||
742 | if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) | 740 | if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) |
743 | return MODE_NO_INTERLACE; | 741 | return MODE_NO_INTERLACE; |
744 | 742 | ||
743 | /* | ||
744 | * The hardware requires a minimum combined horizontal sync and back | ||
745 | * porch of 20 pixels and a minimum vertical back porch of 3 lines. | ||
746 | */ | ||
747 | if (mode->htotal - mode->hsync_start < 20) | ||
748 | return MODE_HBLANK_NARROW; | ||
749 | |||
750 | vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); | ||
751 | if (vbp < 3) | ||
752 | return MODE_VBLANK_NARROW; | ||
753 | |||
745 | return MODE_OK; | 754 | return MODE_OK; |
746 | } | 755 | } |
747 | 756 | ||
@@ -1002,7 +1011,7 @@ unlock: | |||
1002 | drm_modeset_drop_locks(&ctx); | 1011 | drm_modeset_drop_locks(&ctx); |
1003 | drm_modeset_acquire_fini(&ctx); | 1012 | drm_modeset_acquire_fini(&ctx); |
1004 | 1013 | ||
1005 | return 0; | 1014 | return ret; |
1006 | } | 1015 | } |
1007 | 1016 | ||
1008 | static const struct drm_crtc_funcs crtc_funcs_gen2 = { | 1017 | static const struct drm_crtc_funcs crtc_funcs_gen2 = { |
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, | |||
1113 | clk = devm_clk_get(rcdu->dev, clk_name); | 1122 | clk = devm_clk_get(rcdu->dev, clk_name); |
1114 | if (!IS_ERR(clk)) { | 1123 | if (!IS_ERR(clk)) { |
1115 | rcrtc->extclock = clk; | 1124 | rcrtc->extclock = clk; |
1116 | } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) { | 1125 | } else if (PTR_ERR(clk) == -EPROBE_DEFER) { |
1117 | dev_info(rcdu->dev, "can't get external clock %u\n", hwindex); | ||
1118 | return -EPROBE_DEFER; | 1126 | return -EPROBE_DEFER; |
1127 | } else if (rcdu->info->dpll_mask & BIT(hwindex)) { | ||
1128 | /* | ||
1129 | * DU channels that have a display PLL can't use the internal | ||
1130 | * system clock and thus require an external clock. | ||
1131 | */ | ||
1132 | ret = PTR_ERR(clk); | ||
1133 | dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret); | ||
1134 | return ret; | ||
1119 | } | 1135 | } |
1120 | 1136 | ||
1121 | init_waitqueue_head(&rcrtc->flip_wait); | 1137 | init_waitqueue_head(&rcrtc->flip_wait); |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index c6770043dcdc..94f055186b95 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c | |||
@@ -41,7 +41,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = { | |||
41 | .channels_mask = BIT(1) | BIT(0), | 41 | .channels_mask = BIT(1) | BIT(0), |
42 | .routes = { | 42 | .routes = { |
43 | /* | 43 | /* |
44 | * R8A7743 has one RGB output and one LVDS output | 44 | * R8A774[34] has one RGB output and one LVDS output |
45 | */ | 45 | */ |
46 | [RCAR_DU_OUTPUT_DPAD0] = { | 46 | [RCAR_DU_OUTPUT_DPAD0] = { |
47 | .possible_crtcs = BIT(1) | BIT(0), | 47 | .possible_crtcs = BIT(1) | BIT(0), |
@@ -77,6 +77,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = { | |||
77 | }, | 77 | }, |
78 | }; | 78 | }; |
79 | 79 | ||
80 | static const struct rcar_du_device_info rzg1_du_r8a77470_info = { | ||
81 | .gen = 2, | ||
82 | .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | ||
83 | | RCAR_DU_FEATURE_EXT_CTRL_REGS | ||
84 | | RCAR_DU_FEATURE_INTERLACED | ||
85 | | RCAR_DU_FEATURE_TVM_SYNC, | ||
86 | .channels_mask = BIT(1) | BIT(0), | ||
87 | .routes = { | ||
88 | /* | ||
89 | * R8A77470 has two RGB outputs, one LVDS output, and | ||
90 | * one (currently unsupported) analog video output | ||
91 | */ | ||
92 | [RCAR_DU_OUTPUT_DPAD0] = { | ||
93 | .possible_crtcs = BIT(0), | ||
94 | .port = 0, | ||
95 | }, | ||
96 | [RCAR_DU_OUTPUT_DPAD1] = { | ||
97 | .possible_crtcs = BIT(1), | ||
98 | .port = 1, | ||
99 | }, | ||
100 | [RCAR_DU_OUTPUT_LVDS0] = { | ||
101 | .possible_crtcs = BIT(0) | BIT(1), | ||
102 | .port = 2, | ||
103 | }, | ||
104 | }, | ||
105 | }; | ||
106 | |||
80 | static const struct rcar_du_device_info rcar_du_r8a7779_info = { | 107 | static const struct rcar_du_device_info rcar_du_r8a7779_info = { |
81 | .gen = 2, | 108 | .gen = 2, |
82 | .features = RCAR_DU_FEATURE_INTERLACED | 109 | .features = RCAR_DU_FEATURE_INTERLACED |
@@ -341,7 +368,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = { | |||
341 | 368 | ||
342 | static const struct of_device_id rcar_du_of_table[] = { | 369 | static const struct of_device_id rcar_du_of_table[] = { |
343 | { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, | 370 | { .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info }, |
371 | { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info }, | ||
344 | { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, | 372 | { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, |
373 | { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info }, | ||
345 | { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, | 374 | { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, |
346 | { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, | 375 | { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, |
347 | { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, | 376 | { .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info }, |
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 4ebd61ecbee1..fe6f65c94eef 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c | |||
@@ -582,7 +582,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) | |||
582 | * Initialize vertical blanking interrupts handling. Start with vblank | 582 | * Initialize vertical blanking interrupts handling. Start with vblank |
583 | * disabled for all CRTCs. | 583 | * disabled for all CRTCs. |
584 | */ | 584 | */ |
585 | ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1); | 585 | ret = drm_vblank_init(dev, rcdu->num_crtcs); |
586 | if (ret < 0) | 586 | if (ret < 0) |
587 | return ret; | 587 | return ret; |
588 | 588 | ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index 9e07758a755c..39d5ae3fdf72 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c | |||
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) | |||
783 | drm_plane_helper_add(&plane->plane, | 783 | drm_plane_helper_add(&plane->plane, |
784 | &rcar_du_plane_helper_funcs); | 784 | &rcar_du_plane_helper_funcs); |
785 | 785 | ||
786 | drm_plane_create_alpha_property(&plane->plane); | ||
787 | |||
786 | if (type == DRM_PLANE_TYPE_PRIMARY) | 788 | if (type == DRM_PLANE_TYPE_PRIMARY) |
787 | continue; | 789 | continue; |
788 | 790 | ||
789 | drm_object_attach_property(&plane->plane.base, | 791 | drm_object_attach_property(&plane->plane.base, |
790 | rcdu->props.colorkey, | 792 | rcdu->props.colorkey, |
791 | RCAR_DU_COLORKEY_NONE); | 793 | RCAR_DU_COLORKEY_NONE); |
792 | drm_plane_create_alpha_property(&plane->plane); | ||
793 | drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); | 794 | drm_plane_create_zpos_property(&plane->plane, 1, 1, 7); |
794 | } | 795 | } |
795 | 796 | ||
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 173d7ad0b991..534a128a869d 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c | |||
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = { | |||
790 | { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, | 790 | { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, |
791 | { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, | 791 | { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, |
792 | { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info }, | 792 | { .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info }, |
793 | { .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info }, | ||
793 | { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info }, | 794 | { .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info }, |
794 | { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info }, | 795 | { .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info }, |
795 | { .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info }, | 796 | { .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info }, |
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 3e22a54a99c2..4463d3826ecb 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c | |||
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity) | |||
130 | int i; | 130 | int i; |
131 | 131 | ||
132 | for (i = 0; i < entity->num_rq_list; ++i) { | 132 | for (i = 0; i < entity->num_rq_list; ++i) { |
133 | num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs); | 133 | struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; |
134 | |||
135 | if (!entity->rq_list[i]->sched->ready) { | ||
136 | DRM_WARN("sched%s is not ready, skipping", sched->name); | ||
137 | continue; | ||
138 | } | ||
139 | |||
140 | num_jobs = atomic_read(&sched->num_jobs); | ||
134 | if (num_jobs < min_jobs) { | 141 | if (num_jobs < min_jobs) { |
135 | min_jobs = num_jobs; | 142 | min_jobs = num_jobs; |
136 | rq = entity->rq_list[i]; | 143 | rq = entity->rq_list[i]; |
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, | |||
204 | 211 | ||
205 | drm_sched_fence_finished(job->s_fence); | 212 | drm_sched_fence_finished(job->s_fence); |
206 | WARN_ON(job->s_fence->parent); | 213 | WARN_ON(job->s_fence->parent); |
207 | dma_fence_put(&job->s_fence->finished); | ||
208 | job->sched->ops->free_job(job); | 214 | job->sched->ops->free_job(job); |
209 | } | 215 | } |
210 | 216 | ||
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 44fe587aaef9..18ebbb05762e 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c | |||
@@ -196,6 +196,19 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) | |||
196 | schedule_delayed_work(&sched->work_tdr, sched->timeout); | 196 | schedule_delayed_work(&sched->work_tdr, sched->timeout); |
197 | } | 197 | } |
198 | 198 | ||
199 | /** | ||
200 | * drm_sched_fault - immediately start timeout handler | ||
201 | * | ||
202 | * @sched: scheduler where the timeout handling should be started. | ||
203 | * | ||
204 | * Start timeout handling immediately when the driver detects a hardware fault. | ||
205 | */ | ||
206 | void drm_sched_fault(struct drm_gpu_scheduler *sched) | ||
207 | { | ||
208 | mod_delayed_work(system_wq, &sched->work_tdr, 0); | ||
209 | } | ||
210 | EXPORT_SYMBOL(drm_sched_fault); | ||
211 | |||
199 | /* job_finish is called after hw fence signaled | 212 | /* job_finish is called after hw fence signaled |
200 | */ | 213 | */ |
201 | static void drm_sched_job_finish(struct work_struct *work) | 214 | static void drm_sched_job_finish(struct work_struct *work) |
@@ -220,7 +233,6 @@ static void drm_sched_job_finish(struct work_struct *work) | |||
220 | drm_sched_start_timeout(sched); | 233 | drm_sched_start_timeout(sched); |
221 | spin_unlock(&sched->job_list_lock); | 234 | spin_unlock(&sched->job_list_lock); |
222 | 235 | ||
223 | dma_fence_put(&s_job->s_fence->finished); | ||
224 | sched->ops->free_job(s_job); | 236 | sched->ops->free_job(s_job); |
225 | } | 237 | } |
226 | 238 | ||
@@ -283,6 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work) | |||
283 | already_signaled: | 295 | already_signaled: |
284 | ; | 296 | ; |
285 | } | 297 | } |
298 | drm_sched_start_timeout(sched); | ||
286 | spin_unlock(&sched->job_list_lock); | 299 | spin_unlock(&sched->job_list_lock); |
287 | } | 300 | } |
288 | 301 | ||
@@ -406,6 +419,9 @@ int drm_sched_job_init(struct drm_sched_job *job, | |||
406 | struct drm_gpu_scheduler *sched; | 419 | struct drm_gpu_scheduler *sched; |
407 | 420 | ||
408 | drm_sched_entity_select_rq(entity); | 421 | drm_sched_entity_select_rq(entity); |
422 | if (!entity->rq) | ||
423 | return -ENOENT; | ||
424 | |||
409 | sched = entity->rq->sched; | 425 | sched = entity->rq->sched; |
410 | 426 | ||
411 | job->sched = sched; | 427 | job->sched = sched; |
@@ -424,6 +440,18 @@ int drm_sched_job_init(struct drm_sched_job *job, | |||
424 | EXPORT_SYMBOL(drm_sched_job_init); | 440 | EXPORT_SYMBOL(drm_sched_job_init); |
425 | 441 | ||
426 | /** | 442 | /** |
443 | * drm_sched_job_cleanup - clean up scheduler job resources | ||
444 | * | ||
445 | * @job: scheduler job to clean up | ||
446 | */ | ||
447 | void drm_sched_job_cleanup(struct drm_sched_job *job) | ||
448 | { | ||
449 | dma_fence_put(&job->s_fence->finished); | ||
450 | job->s_fence = NULL; | ||
451 | } | ||
452 | EXPORT_SYMBOL(drm_sched_job_cleanup); | ||
453 | |||
454 | /** | ||
427 | * drm_sched_ready - is the scheduler ready | 455 | * drm_sched_ready - is the scheduler ready |
428 | * | 456 | * |
429 | * @sched: scheduler instance | 457 | * @sched: scheduler instance |
@@ -619,6 +647,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, | |||
619 | return PTR_ERR(sched->thread); | 647 | return PTR_ERR(sched->thread); |
620 | } | 648 | } |
621 | 649 | ||
650 | sched->ready = true; | ||
622 | return 0; | 651 | return 0; |
623 | } | 652 | } |
624 | EXPORT_SYMBOL(drm_sched_init); | 653 | EXPORT_SYMBOL(drm_sched_init); |
@@ -634,5 +663,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) | |||
634 | { | 663 | { |
635 | if (sched->thread) | 664 | if (sched->thread) |
636 | kthread_stop(sched->thread); | 665 | kthread_stop(sched->thread); |
666 | |||
667 | sched->ready = false; | ||
637 | } | 668 | } |
638 | EXPORT_SYMBOL(drm_sched_fini); | 669 | EXPORT_SYMBOL(drm_sched_fini); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 83b4657ffb10..d87935bf8e30 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -45,6 +45,14 @@ | |||
45 | 45 | ||
46 | static void ttm_bo_global_kobj_release(struct kobject *kobj); | 46 | static void ttm_bo_global_kobj_release(struct kobject *kobj); |
47 | 47 | ||
48 | /** | ||
49 | * ttm_global_mutex - protecting the global BO state | ||
50 | */ | ||
51 | DEFINE_MUTEX(ttm_global_mutex); | ||
52 | struct ttm_bo_global ttm_bo_glob = { | ||
53 | .use_count = 0 | ||
54 | }; | ||
55 | |||
48 | static struct attribute ttm_bo_count = { | 56 | static struct attribute ttm_bo_count = { |
49 | .name = "bo_count", | 57 | .name = "bo_count", |
50 | .mode = S_IRUGO | 58 | .mode = S_IRUGO |
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj) | |||
1519 | container_of(kobj, struct ttm_bo_global, kobj); | 1527 | container_of(kobj, struct ttm_bo_global, kobj); |
1520 | 1528 | ||
1521 | __free_page(glob->dummy_read_page); | 1529 | __free_page(glob->dummy_read_page); |
1522 | kfree(glob); | ||
1523 | } | 1530 | } |
1524 | 1531 | ||
1525 | void ttm_bo_global_release(struct drm_global_reference *ref) | 1532 | static void ttm_bo_global_release(void) |
1526 | { | 1533 | { |
1527 | struct ttm_bo_global *glob = ref->object; | 1534 | struct ttm_bo_global *glob = &ttm_bo_glob; |
1535 | |||
1536 | mutex_lock(&ttm_global_mutex); | ||
1537 | if (--glob->use_count > 0) | ||
1538 | goto out; | ||
1528 | 1539 | ||
1529 | kobject_del(&glob->kobj); | 1540 | kobject_del(&glob->kobj); |
1530 | kobject_put(&glob->kobj); | 1541 | kobject_put(&glob->kobj); |
1542 | ttm_mem_global_release(&ttm_mem_glob); | ||
1543 | out: | ||
1544 | mutex_unlock(&ttm_global_mutex); | ||
1531 | } | 1545 | } |
1532 | EXPORT_SYMBOL(ttm_bo_global_release); | ||
1533 | 1546 | ||
1534 | int ttm_bo_global_init(struct drm_global_reference *ref) | 1547 | static int ttm_bo_global_init(void) |
1535 | { | 1548 | { |
1536 | struct ttm_bo_global_ref *bo_ref = | 1549 | struct ttm_bo_global *glob = &ttm_bo_glob; |
1537 | container_of(ref, struct ttm_bo_global_ref, ref); | 1550 | int ret = 0; |
1538 | struct ttm_bo_global *glob = ref->object; | ||
1539 | int ret; | ||
1540 | unsigned i; | 1551 | unsigned i; |
1541 | 1552 | ||
1542 | mutex_init(&glob->device_list_mutex); | 1553 | mutex_lock(&ttm_global_mutex); |
1554 | if (++glob->use_count > 1) | ||
1555 | goto out; | ||
1556 | |||
1557 | ret = ttm_mem_global_init(&ttm_mem_glob); | ||
1558 | if (ret) | ||
1559 | goto out; | ||
1560 | |||
1543 | spin_lock_init(&glob->lru_lock); | 1561 | spin_lock_init(&glob->lru_lock); |
1544 | glob->mem_glob = bo_ref->mem_glob; | 1562 | glob->mem_glob = &ttm_mem_glob; |
1545 | glob->mem_glob->bo_glob = glob; | 1563 | glob->mem_glob->bo_glob = glob; |
1546 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); | 1564 | glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
1547 | 1565 | ||
1548 | if (unlikely(glob->dummy_read_page == NULL)) { | 1566 | if (unlikely(glob->dummy_read_page == NULL)) { |
1549 | ret = -ENOMEM; | 1567 | ret = -ENOMEM; |
1550 | goto out_no_drp; | 1568 | goto out; |
1551 | } | 1569 | } |
1552 | 1570 | ||
1553 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) | 1571 | for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) |
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref) | |||
1559 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); | 1577 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
1560 | if (unlikely(ret != 0)) | 1578 | if (unlikely(ret != 0)) |
1561 | kobject_put(&glob->kobj); | 1579 | kobject_put(&glob->kobj); |
1562 | return ret; | 1580 | out: |
1563 | out_no_drp: | 1581 | mutex_unlock(&ttm_global_mutex); |
1564 | kfree(glob); | ||
1565 | return ret; | 1582 | return ret; |
1566 | } | 1583 | } |
1567 | EXPORT_SYMBOL(ttm_bo_global_init); | ||
1568 | |||
1569 | 1584 | ||
1570 | int ttm_bo_device_release(struct ttm_bo_device *bdev) | 1585 | int ttm_bo_device_release(struct ttm_bo_device *bdev) |
1571 | { | 1586 | { |
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) | |||
1587 | } | 1602 | } |
1588 | } | 1603 | } |
1589 | 1604 | ||
1590 | mutex_lock(&glob->device_list_mutex); | 1605 | mutex_lock(&ttm_global_mutex); |
1591 | list_del(&bdev->device_list); | 1606 | list_del(&bdev->device_list); |
1592 | mutex_unlock(&glob->device_list_mutex); | 1607 | mutex_unlock(&ttm_global_mutex); |
1593 | 1608 | ||
1594 | cancel_delayed_work_sync(&bdev->wq); | 1609 | cancel_delayed_work_sync(&bdev->wq); |
1595 | 1610 | ||
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) | |||
1604 | 1619 | ||
1605 | drm_vma_offset_manager_destroy(&bdev->vma_manager); | 1620 | drm_vma_offset_manager_destroy(&bdev->vma_manager); |
1606 | 1621 | ||
1622 | if (!ret) | ||
1623 | ttm_bo_global_release(); | ||
1624 | |||
1607 | return ret; | 1625 | return ret; |
1608 | } | 1626 | } |
1609 | EXPORT_SYMBOL(ttm_bo_device_release); | 1627 | EXPORT_SYMBOL(ttm_bo_device_release); |
1610 | 1628 | ||
1611 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | 1629 | int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1612 | struct ttm_bo_global *glob, | ||
1613 | struct ttm_bo_driver *driver, | 1630 | struct ttm_bo_driver *driver, |
1614 | struct address_space *mapping, | 1631 | struct address_space *mapping, |
1615 | uint64_t file_page_offset, | 1632 | uint64_t file_page_offset, |
1616 | bool need_dma32) | 1633 | bool need_dma32) |
1617 | { | 1634 | { |
1618 | int ret = -EINVAL; | 1635 | struct ttm_bo_global *glob = &ttm_bo_glob; |
1636 | int ret; | ||
1637 | |||
1638 | ret = ttm_bo_global_init(); | ||
1639 | if (ret) | ||
1640 | return ret; | ||
1619 | 1641 | ||
1620 | bdev->driver = driver; | 1642 | bdev->driver = driver; |
1621 | 1643 | ||
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1636 | bdev->dev_mapping = mapping; | 1658 | bdev->dev_mapping = mapping; |
1637 | bdev->glob = glob; | 1659 | bdev->glob = glob; |
1638 | bdev->need_dma32 = need_dma32; | 1660 | bdev->need_dma32 = need_dma32; |
1639 | mutex_lock(&glob->device_list_mutex); | 1661 | mutex_lock(&ttm_global_mutex); |
1640 | list_add_tail(&bdev->device_list, &glob->device_list); | 1662 | list_add_tail(&bdev->device_list, &glob->device_list); |
1641 | mutex_unlock(&glob->device_list_mutex); | 1663 | mutex_unlock(&ttm_global_mutex); |
1642 | 1664 | ||
1643 | return 0; | 1665 | return 0; |
1644 | out_no_sys: | 1666 | out_no_sys: |
1667 | ttm_bo_global_release(); | ||
1645 | return ret; | 1668 | return ret; |
1646 | } | 1669 | } |
1647 | EXPORT_SYMBOL(ttm_bo_device_init); | 1670 | EXPORT_SYMBOL(ttm_bo_device_init); |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index e493edb0d3e7..efa005a1c1b7 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -187,14 +187,12 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
187 | struct ttm_buffer_object *bo; | 187 | struct ttm_buffer_object *bo; |
188 | struct ttm_bo_global *glob; | 188 | struct ttm_bo_global *glob; |
189 | struct ttm_bo_device *bdev; | 189 | struct ttm_bo_device *bdev; |
190 | struct ttm_bo_driver *driver; | ||
191 | 190 | ||
192 | if (list_empty(list)) | 191 | if (list_empty(list)) |
193 | return; | 192 | return; |
194 | 193 | ||
195 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; | 194 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
196 | bdev = bo->bdev; | 195 | bdev = bo->bdev; |
197 | driver = bdev->driver; | ||
198 | glob = bo->bdev->glob; | 196 | glob = bo->bdev->glob; |
199 | 197 | ||
200 | spin_lock(&glob->lru_lock); | 198 | spin_lock(&glob->lru_lock); |
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 450387c92b63..f1567c353b54 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -41,6 +41,9 @@ | |||
41 | 41 | ||
42 | #define TTM_MEMORY_ALLOC_RETRIES 4 | 42 | #define TTM_MEMORY_ALLOC_RETRIES 4 |
43 | 43 | ||
44 | struct ttm_mem_global ttm_mem_glob; | ||
45 | EXPORT_SYMBOL(ttm_mem_glob); | ||
46 | |||
44 | struct ttm_mem_zone { | 47 | struct ttm_mem_zone { |
45 | struct kobject kobj; | 48 | struct kobject kobj; |
46 | struct ttm_mem_global *glob; | 49 | struct ttm_mem_global *glob; |
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj, | |||
216 | return size; | 219 | return size; |
217 | } | 220 | } |
218 | 221 | ||
219 | static void ttm_mem_global_kobj_release(struct kobject *kobj) | ||
220 | { | ||
221 | struct ttm_mem_global *glob = | ||
222 | container_of(kobj, struct ttm_mem_global, kobj); | ||
223 | |||
224 | kfree(glob); | ||
225 | } | ||
226 | |||
227 | static struct attribute *ttm_mem_global_attrs[] = { | 222 | static struct attribute *ttm_mem_global_attrs[] = { |
228 | &ttm_mem_global_lower_mem_limit, | 223 | &ttm_mem_global_lower_mem_limit, |
229 | NULL | 224 | NULL |
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = { | |||
235 | }; | 230 | }; |
236 | 231 | ||
237 | static struct kobj_type ttm_mem_glob_kobj_type = { | 232 | static struct kobj_type ttm_mem_glob_kobj_type = { |
238 | .release = &ttm_mem_global_kobj_release, | ||
239 | .sysfs_ops = &ttm_mem_global_ops, | 233 | .sysfs_ops = &ttm_mem_global_ops, |
240 | .default_attrs = ttm_mem_global_attrs, | 234 | .default_attrs = ttm_mem_global_attrs, |
241 | }; | 235 | }; |
@@ -464,7 +458,6 @@ out_no_zone: | |||
464 | ttm_mem_global_release(glob); | 458 | ttm_mem_global_release(glob); |
465 | return ret; | 459 | return ret; |
466 | } | 460 | } |
467 | EXPORT_SYMBOL(ttm_mem_global_init); | ||
468 | 461 | ||
469 | void ttm_mem_global_release(struct ttm_mem_global *glob) | 462 | void ttm_mem_global_release(struct ttm_mem_global *glob) |
470 | { | 463 | { |
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob) | |||
486 | kobject_del(&glob->kobj); | 479 | kobject_del(&glob->kobj); |
487 | kobject_put(&glob->kobj); | 480 | kobject_put(&glob->kobj); |
488 | } | 481 | } |
489 | EXPORT_SYMBOL(ttm_mem_global_release); | ||
490 | 482 | ||
491 | static void ttm_check_swapping(struct ttm_mem_global *glob) | 483 | static void ttm_check_swapping(struct ttm_mem_global *glob) |
492 | { | 484 | { |
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index e1f2aab0717b..c66d0ce21435 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c | |||
@@ -35,6 +35,8 @@ v3d_job_free(struct drm_sched_job *sched_job) | |||
35 | { | 35 | { |
36 | struct v3d_job *job = to_v3d_job(sched_job); | 36 | struct v3d_job *job = to_v3d_job(sched_job); |
37 | 37 | ||
38 | drm_sched_job_cleanup(sched_job); | ||
39 | |||
38 | v3d_exec_put(job->exec); | 40 | v3d_exec_put(job->exec); |
39 | } | 41 | } |
40 | 42 | ||
@@ -167,9 +169,6 @@ v3d_job_timedout(struct drm_sched_job *sched_job) | |||
167 | if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { | 169 | if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) { |
168 | job->timedout_ctca = ctca; | 170 | job->timedout_ctca = ctca; |
169 | job->timedout_ctra = ctra; | 171 | job->timedout_ctra = ctra; |
170 | |||
171 | schedule_delayed_work(&job->base.sched->work_tdr, | ||
172 | job->base.sched->timeout); | ||
173 | return; | 172 | return; |
174 | } | 173 | } |
175 | 174 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 7bec6e36886b..f7e877857c1f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h | |||
@@ -145,9 +145,6 @@ struct virtio_gpu_fbdev { | |||
145 | }; | 145 | }; |
146 | 146 | ||
147 | struct virtio_gpu_mman { | 147 | struct virtio_gpu_mman { |
148 | struct ttm_bo_global_ref bo_global_ref; | ||
149 | struct drm_global_reference mem_global_ref; | ||
150 | bool mem_global_referenced; | ||
151 | struct ttm_bo_device bdev; | 148 | struct ttm_bo_device bdev; |
152 | }; | 149 | }; |
153 | 150 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index cd63dffa6d40..4bfbf25fabff 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c | |||
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev) | |||
50 | return vgdev; | 50 | return vgdev; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref) | ||
54 | { | ||
55 | return ttm_mem_global_init(ref->object); | ||
56 | } | ||
57 | |||
58 | static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref) | ||
59 | { | ||
60 | ttm_mem_global_release(ref->object); | ||
61 | } | ||
62 | |||
63 | static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev) | ||
64 | { | ||
65 | struct drm_global_reference *global_ref; | ||
66 | int r; | ||
67 | |||
68 | vgdev->mman.mem_global_referenced = false; | ||
69 | global_ref = &vgdev->mman.mem_global_ref; | ||
70 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
71 | global_ref->size = sizeof(struct ttm_mem_global); | ||
72 | global_ref->init = &virtio_gpu_ttm_mem_global_init; | ||
73 | global_ref->release = &virtio_gpu_ttm_mem_global_release; | ||
74 | |||
75 | r = drm_global_item_ref(global_ref); | ||
76 | if (r != 0) { | ||
77 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
78 | "subsystem.\n"); | ||
79 | return r; | ||
80 | } | ||
81 | |||
82 | vgdev->mman.bo_global_ref.mem_glob = | ||
83 | vgdev->mman.mem_global_ref.object; | ||
84 | global_ref = &vgdev->mman.bo_global_ref.ref; | ||
85 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
86 | global_ref->size = sizeof(struct ttm_bo_global); | ||
87 | global_ref->init = &ttm_bo_global_init; | ||
88 | global_ref->release = &ttm_bo_global_release; | ||
89 | r = drm_global_item_ref(global_ref); | ||
90 | if (r != 0) { | ||
91 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
92 | drm_global_item_unref(&vgdev->mman.mem_global_ref); | ||
93 | return r; | ||
94 | } | ||
95 | |||
96 | vgdev->mman.mem_global_referenced = true; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev) | ||
101 | { | ||
102 | if (vgdev->mman.mem_global_referenced) { | ||
103 | drm_global_item_unref(&vgdev->mman.bo_global_ref.ref); | ||
104 | drm_global_item_unref(&vgdev->mman.mem_global_ref); | ||
105 | vgdev->mman.mem_global_referenced = false; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) | 53 | int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) |
110 | { | 54 | { |
111 | struct drm_file *file_priv; | 55 | struct drm_file *file_priv; |
@@ -382,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) | |||
382 | { | 326 | { |
383 | int r; | 327 | int r; |
384 | 328 | ||
385 | r = virtio_gpu_ttm_global_init(vgdev); | ||
386 | if (r) | ||
387 | return r; | ||
388 | /* No others user of address space so set it to 0 */ | 329 | /* No others user of address space so set it to 0 */ |
389 | r = ttm_bo_device_init(&vgdev->mman.bdev, | 330 | r = ttm_bo_device_init(&vgdev->mman.bdev, |
390 | vgdev->mman.bo_global_ref.ref.object, | ||
391 | &virtio_gpu_bo_driver, | 331 | &virtio_gpu_bo_driver, |
392 | vgdev->ddev->anon_inode->i_mapping, | 332 | vgdev->ddev->anon_inode->i_mapping, |
393 | DRM_FILE_PAGE_OFFSET, 0); | 333 | DRM_FILE_PAGE_OFFSET, 0); |
@@ -406,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) | |||
406 | err_mm_init: | 346 | err_mm_init: |
407 | ttm_bo_device_release(&vgdev->mman.bdev); | 347 | ttm_bo_device_release(&vgdev->mman.bdev); |
408 | err_dev_init: | 348 | err_dev_init: |
409 | virtio_gpu_ttm_global_fini(vgdev); | ||
410 | return r; | 349 | return r; |
411 | } | 350 | } |
412 | 351 | ||
413 | void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) | 352 | void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) |
414 | { | 353 | { |
415 | ttm_bo_device_release(&vgdev->mman.bdev); | 354 | ttm_bo_device_release(&vgdev->mman.bdev); |
416 | virtio_gpu_ttm_global_fini(vgdev); | ||
417 | DRM_INFO("virtio_gpu: ttm finalized\n"); | 355 | DRM_INFO("virtio_gpu: ttm finalized\n"); |
418 | } | 356 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 61a84b958d67..b9c078860a7c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -801,11 +801,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
801 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 801 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
802 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | 802 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
803 | 803 | ||
804 | ret = vmw_ttm_global_init(dev_priv); | ||
805 | if (unlikely(ret != 0)) | ||
806 | goto out_err0; | ||
807 | |||
808 | |||
809 | vmw_master_init(&dev_priv->fbdev_master); | 804 | vmw_master_init(&dev_priv->fbdev_master); |
810 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | 805 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); |
811 | dev_priv->active_master = &dev_priv->fbdev_master; | 806 | dev_priv->active_master = &dev_priv->fbdev_master; |
@@ -816,7 +811,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
816 | if (unlikely(dev_priv->mmio_virt == NULL)) { | 811 | if (unlikely(dev_priv->mmio_virt == NULL)) { |
817 | ret = -ENOMEM; | 812 | ret = -ENOMEM; |
818 | DRM_ERROR("Failed mapping MMIO.\n"); | 813 | DRM_ERROR("Failed mapping MMIO.\n"); |
819 | goto out_err3; | 814 | goto out_err0; |
820 | } | 815 | } |
821 | 816 | ||
822 | /* Need mmio memory to check for fifo pitchlock cap. */ | 817 | /* Need mmio memory to check for fifo pitchlock cap. */ |
@@ -828,8 +823,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
828 | goto out_err4; | 823 | goto out_err4; |
829 | } | 824 | } |
830 | 825 | ||
831 | dev_priv->tdev = ttm_object_device_init | 826 | dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, |
832 | (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); | 827 | &vmw_prime_dmabuf_ops); |
833 | 828 | ||
834 | if (unlikely(dev_priv->tdev == NULL)) { | 829 | if (unlikely(dev_priv->tdev == NULL)) { |
835 | DRM_ERROR("Unable to initialize TTM object management.\n"); | 830 | DRM_ERROR("Unable to initialize TTM object management.\n"); |
@@ -870,7 +865,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
870 | } | 865 | } |
871 | 866 | ||
872 | ret = ttm_bo_device_init(&dev_priv->bdev, | 867 | ret = ttm_bo_device_init(&dev_priv->bdev, |
873 | dev_priv->bo_global_ref.ref.object, | ||
874 | &vmw_bo_driver, | 868 | &vmw_bo_driver, |
875 | dev->anon_inode->i_mapping, | 869 | dev->anon_inode->i_mapping, |
876 | VMWGFX_FILE_PAGE_OFFSET, | 870 | VMWGFX_FILE_PAGE_OFFSET, |
@@ -992,8 +986,6 @@ out_no_device: | |||
992 | ttm_object_device_release(&dev_priv->tdev); | 986 | ttm_object_device_release(&dev_priv->tdev); |
993 | out_err4: | 987 | out_err4: |
994 | memunmap(dev_priv->mmio_virt); | 988 | memunmap(dev_priv->mmio_virt); |
995 | out_err3: | ||
996 | vmw_ttm_global_release(dev_priv); | ||
997 | out_err0: | 989 | out_err0: |
998 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 990 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
999 | idr_destroy(&dev_priv->res_idr[i]); | 991 | idr_destroy(&dev_priv->res_idr[i]); |
@@ -1045,7 +1037,6 @@ static void vmw_driver_unload(struct drm_device *dev) | |||
1045 | memunmap(dev_priv->mmio_virt); | 1037 | memunmap(dev_priv->mmio_virt); |
1046 | if (dev_priv->ctx.staged_bindings) | 1038 | if (dev_priv->ctx.staged_bindings) |
1047 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); | 1039 | vmw_binding_state_free(dev_priv->ctx.staged_bindings); |
1048 | vmw_ttm_global_release(dev_priv); | ||
1049 | 1040 | ||
1050 | for (i = vmw_res_context; i < vmw_res_max; ++i) | 1041 | for (i = vmw_res_context; i < vmw_res_max; ++i) |
1051 | idr_destroy(&dev_priv->res_idr[i]); | 1042 | idr_destroy(&dev_priv->res_idr[i]); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 59f614225bcd..28df788da44e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -417,8 +417,6 @@ enum { | |||
417 | 417 | ||
418 | struct vmw_private { | 418 | struct vmw_private { |
419 | struct ttm_bo_device bdev; | 419 | struct ttm_bo_device bdev; |
420 | struct ttm_bo_global_ref bo_global_ref; | ||
421 | struct drm_global_reference mem_global_ref; | ||
422 | 420 | ||
423 | struct vmw_fifo_state fifo; | 421 | struct vmw_fifo_state fifo; |
424 | 422 | ||
@@ -842,8 +840,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv, | |||
842 | * TTM glue - vmwgfx_ttm_glue.c | 840 | * TTM glue - vmwgfx_ttm_glue.c |
843 | */ | 841 | */ |
844 | 842 | ||
845 | extern int vmw_ttm_global_init(struct vmw_private *dev_priv); | ||
846 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); | ||
847 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | 843 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); |
848 | 844 | ||
849 | /** | 845 | /** |
@@ -1363,7 +1359,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf) | |||
1363 | 1359 | ||
1364 | static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) | 1360 | static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) |
1365 | { | 1361 | { |
1366 | return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; | 1362 | return &ttm_mem_glob; |
1367 | } | 1363 | } |
1368 | 1364 | ||
1369 | static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) | 1365 | static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index 7b1e5a5cbd2c..154eb09aa91e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | |||
@@ -42,57 +42,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) | |||
42 | dev_priv = vmw_priv(file_priv->minor->dev); | 42 | dev_priv = vmw_priv(file_priv->minor->dev); |
43 | return ttm_bo_mmap(filp, vma, &dev_priv->bdev); | 43 | return ttm_bo_mmap(filp, vma, &dev_priv->bdev); |
44 | } | 44 | } |
45 | |||
46 | static int vmw_ttm_mem_global_init(struct drm_global_reference *ref) | ||
47 | { | ||
48 | DRM_INFO("global init.\n"); | ||
49 | return ttm_mem_global_init(ref->object); | ||
50 | } | ||
51 | |||
52 | static void vmw_ttm_mem_global_release(struct drm_global_reference *ref) | ||
53 | { | ||
54 | ttm_mem_global_release(ref->object); | ||
55 | } | ||
56 | |||
57 | int vmw_ttm_global_init(struct vmw_private *dev_priv) | ||
58 | { | ||
59 | struct drm_global_reference *global_ref; | ||
60 | int ret; | ||
61 | |||
62 | global_ref = &dev_priv->mem_global_ref; | ||
63 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
64 | global_ref->size = sizeof(struct ttm_mem_global); | ||
65 | global_ref->init = &vmw_ttm_mem_global_init; | ||
66 | global_ref->release = &vmw_ttm_mem_global_release; | ||
67 | |||
68 | ret = drm_global_item_ref(global_ref); | ||
69 | if (unlikely(ret != 0)) { | ||
70 | DRM_ERROR("Failed setting up TTM memory accounting.\n"); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | dev_priv->bo_global_ref.mem_glob = | ||
75 | dev_priv->mem_global_ref.object; | ||
76 | global_ref = &dev_priv->bo_global_ref.ref; | ||
77 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
78 | global_ref->size = sizeof(struct ttm_bo_global); | ||
79 | global_ref->init = &ttm_bo_global_init; | ||
80 | global_ref->release = &ttm_bo_global_release; | ||
81 | ret = drm_global_item_ref(global_ref); | ||
82 | |||
83 | if (unlikely(ret != 0)) { | ||
84 | DRM_ERROR("Failed setting up TTM buffer objects.\n"); | ||
85 | goto out_no_bo; | ||
86 | } | ||
87 | |||
88 | return 0; | ||
89 | out_no_bo: | ||
90 | drm_global_item_unref(&dev_priv->mem_global_ref); | ||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | void vmw_ttm_global_release(struct vmw_private *dev_priv) | ||
95 | { | ||
96 | drm_global_item_unref(&dev_priv->bo_global_ref.ref); | ||
97 | drm_global_item_unref(&dev_priv->mem_global_ref); | ||
98 | } | ||