diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
24 files changed, 389 insertions, 241 deletions
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index b5475c91e2ef..e9f343b124b0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2799 | MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2800 | MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2801 | MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2802 | MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2802 | return 0; | 2803 | return 0; |
2803 | } | 2804 | } |
2804 | 2805 | ||
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..e1675a00df12 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h | |||
@@ -41,7 +41,7 @@ struct intel_gvt_mpt { | |||
41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); | 41 | int (*host_init)(struct device *dev, void *gvt, const void *ops); |
42 | void (*host_exit)(struct device *dev, void *gvt); | 42 | void (*host_exit)(struct device *dev, void *gvt); |
43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); | 43 | int (*attach_vgpu)(void *vgpu, unsigned long *handle); |
44 | void (*detach_vgpu)(unsigned long handle); | 44 | void (*detach_vgpu)(void *vgpu); |
45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); | 45 | int (*inject_msi)(unsigned long handle, u32 addr, u16 data); |
46 | unsigned long (*from_virt_to_mfn)(void *p); | 46 | unsigned long (*from_virt_to_mfn)(void *p); |
47 | int (*enable_page_track)(unsigned long handle, u64 gfn); | 47 | int (*enable_page_track)(unsigned long handle, u64 gfn); |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index c1072143da1d..dd3dfd00f4e6 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
996 | { | 996 | { |
997 | unsigned int index; | 997 | unsigned int index; |
998 | u64 virtaddr; | 998 | u64 virtaddr; |
999 | unsigned long req_size, pgoff = 0; | 999 | unsigned long req_size, pgoff, req_start; |
1000 | pgprot_t pg_prot; | 1000 | pgprot_t pg_prot; |
1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); | 1001 | struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); |
1002 | 1002 | ||
@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) | |||
1014 | pg_prot = vma->vm_page_prot; | 1014 | pg_prot = vma->vm_page_prot; |
1015 | virtaddr = vma->vm_start; | 1015 | virtaddr = vma->vm_start; |
1016 | req_size = vma->vm_end - vma->vm_start; | 1016 | req_size = vma->vm_end - vma->vm_start; |
1017 | pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; | 1017 | pgoff = vma->vm_pgoff & |
1018 | ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1); | ||
1019 | req_start = pgoff << PAGE_SHIFT; | ||
1020 | |||
1021 | if (!intel_vgpu_in_aperture(vgpu, req_start)) | ||
1022 | return -EINVAL; | ||
1023 | if (req_start + req_size > | ||
1024 | vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu)) | ||
1025 | return -EINVAL; | ||
1026 | |||
1027 | pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff; | ||
1018 | 1028 | ||
1019 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); | 1029 | return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); |
1020 | } | 1030 | } |
@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) | |||
1662 | return 0; | 1672 | return 0; |
1663 | } | 1673 | } |
1664 | 1674 | ||
1665 | static void kvmgt_detach_vgpu(unsigned long handle) | 1675 | static void kvmgt_detach_vgpu(void *p_vgpu) |
1666 | { | 1676 | { |
1667 | /* nothing to do here */ | 1677 | int i; |
1678 | struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu; | ||
1679 | |||
1680 | if (!vgpu->vdev.region) | ||
1681 | return; | ||
1682 | |||
1683 | for (i = 0; i < vgpu->vdev.num_regions; i++) | ||
1684 | if (vgpu->vdev.region[i].ops->release) | ||
1685 | vgpu->vdev.region[i].ops->release(vgpu, | ||
1686 | &vgpu->vdev.region[i]); | ||
1687 | vgpu->vdev.num_regions = 0; | ||
1688 | kfree(vgpu->vdev.region); | ||
1689 | vgpu->vdev.region = NULL; | ||
1668 | } | 1690 | } |
1669 | 1691 | ||
1670 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | 1692 | static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) |
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..3ed34123d8d1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h | |||
@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) | |||
101 | if (!intel_gvt_host.mpt->detach_vgpu) | 101 | if (!intel_gvt_host.mpt->detach_vgpu) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | intel_gvt_host.mpt->detach_vgpu(vgpu->handle); | 104 | intel_gvt_host.mpt->detach_vgpu(vgpu); |
105 | } | 105 | } |
106 | 106 | ||
107 | #define MSI_CAP_CONTROL(offset) (offset + 2) | 107 | #define MSI_CAP_CONTROL(offset) (offset + 2) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1ad8c5e1455d..55bb7885e228 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -332,6 +332,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
332 | 332 | ||
333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 333 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); | 334 | i915_gem_object_put(wa_ctx->indirect_ctx.obj); |
335 | |||
336 | wa_ctx->indirect_ctx.obj = NULL; | ||
337 | wa_ctx->indirect_ctx.shadow_va = NULL; | ||
335 | } | 338 | } |
336 | 339 | ||
337 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | 340 | static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, |
@@ -356,6 +359,33 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, | |||
356 | return 0; | 359 | return 0; |
357 | } | 360 | } |
358 | 361 | ||
362 | static int | ||
363 | intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) | ||
364 | { | ||
365 | struct intel_vgpu *vgpu = workload->vgpu; | ||
366 | struct intel_vgpu_submission *s = &vgpu->submission; | ||
367 | struct i915_gem_context *shadow_ctx = s->shadow_ctx; | ||
368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
369 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; | ||
370 | struct i915_request *rq; | ||
371 | int ret = 0; | ||
372 | |||
373 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | ||
374 | |||
375 | if (workload->req) | ||
376 | goto out; | ||
377 | |||
378 | rq = i915_request_alloc(engine, shadow_ctx); | ||
379 | if (IS_ERR(rq)) { | ||
380 | gvt_vgpu_err("fail to allocate gem request\n"); | ||
381 | ret = PTR_ERR(rq); | ||
382 | goto out; | ||
383 | } | ||
384 | workload->req = i915_request_get(rq); | ||
385 | out: | ||
386 | return ret; | ||
387 | } | ||
388 | |||
359 | /** | 389 | /** |
360 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and | 390 | * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and |
361 | * shadow it as well, include ringbuffer,wa_ctx and ctx. | 391 | * shadow it as well, include ringbuffer,wa_ctx and ctx. |
@@ -372,12 +402,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
372 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 402 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
373 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; | 403 | struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id]; |
374 | struct intel_context *ce; | 404 | struct intel_context *ce; |
375 | struct i915_request *rq; | ||
376 | int ret; | 405 | int ret; |
377 | 406 | ||
378 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 407 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
379 | 408 | ||
380 | if (workload->req) | 409 | if (workload->shadow) |
381 | return 0; | 410 | return 0; |
382 | 411 | ||
383 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); | 412 | ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); |
@@ -417,22 +446,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | |||
417 | goto err_shadow; | 446 | goto err_shadow; |
418 | } | 447 | } |
419 | 448 | ||
420 | rq = i915_request_alloc(engine, shadow_ctx); | 449 | workload->shadow = true; |
421 | if (IS_ERR(rq)) { | ||
422 | gvt_vgpu_err("fail to allocate gem request\n"); | ||
423 | ret = PTR_ERR(rq); | ||
424 | goto err_shadow; | ||
425 | } | ||
426 | workload->req = i915_request_get(rq); | ||
427 | |||
428 | ret = populate_shadow_context(workload); | ||
429 | if (ret) | ||
430 | goto err_req; | ||
431 | |||
432 | return 0; | 450 | return 0; |
433 | err_req: | ||
434 | rq = fetch_and_zero(&workload->req); | ||
435 | i915_request_put(rq); | ||
436 | err_shadow: | 451 | err_shadow: |
437 | release_shadow_wa_ctx(&workload->wa_ctx); | 452 | release_shadow_wa_ctx(&workload->wa_ctx); |
438 | err_unpin: | 453 | err_unpin: |
@@ -671,23 +686,31 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
671 | mutex_lock(&vgpu->vgpu_lock); | 686 | mutex_lock(&vgpu->vgpu_lock); |
672 | mutex_lock(&dev_priv->drm.struct_mutex); | 687 | mutex_lock(&dev_priv->drm.struct_mutex); |
673 | 688 | ||
689 | ret = intel_gvt_workload_req_alloc(workload); | ||
690 | if (ret) | ||
691 | goto err_req; | ||
692 | |||
674 | ret = intel_gvt_scan_and_shadow_workload(workload); | 693 | ret = intel_gvt_scan_and_shadow_workload(workload); |
675 | if (ret) | 694 | if (ret) |
676 | goto out; | 695 | goto out; |
677 | 696 | ||
678 | ret = prepare_workload(workload); | 697 | ret = populate_shadow_context(workload); |
698 | if (ret) { | ||
699 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
700 | goto out; | ||
701 | } | ||
679 | 702 | ||
703 | ret = prepare_workload(workload); | ||
680 | out: | 704 | out: |
681 | if (ret) | ||
682 | workload->status = ret; | ||
683 | |||
684 | if (!IS_ERR_OR_NULL(workload->req)) { | 705 | if (!IS_ERR_OR_NULL(workload->req)) { |
685 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", | 706 | gvt_dbg_sched("ring id %d submit workload to i915 %p\n", |
686 | ring_id, workload->req); | 707 | ring_id, workload->req); |
687 | i915_request_add(workload->req); | 708 | i915_request_add(workload->req); |
688 | workload->dispatched = true; | 709 | workload->dispatched = true; |
689 | } | 710 | } |
690 | 711 | err_req: | |
712 | if (ret) | ||
713 | workload->status = ret; | ||
691 | mutex_unlock(&dev_priv->drm.struct_mutex); | 714 | mutex_unlock(&dev_priv->drm.struct_mutex); |
692 | mutex_unlock(&vgpu->vgpu_lock); | 715 | mutex_unlock(&vgpu->vgpu_lock); |
693 | return ret; | 716 | return ret; |
@@ -891,11 +914,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
891 | 914 | ||
892 | list_del_init(&workload->list); | 915 | list_del_init(&workload->list); |
893 | 916 | ||
894 | if (!workload->status) { | ||
895 | release_shadow_batch_buffer(workload); | ||
896 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
897 | } | ||
898 | |||
899 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { | 917 | if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { |
900 | /* if workload->status is not successful means HW GPU | 918 | /* if workload->status is not successful means HW GPU |
901 | * has occurred GPU hang or something wrong with i915/GVT, | 919 | * has occurred GPU hang or something wrong with i915/GVT, |
@@ -1263,6 +1281,9 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) | |||
1263 | { | 1281 | { |
1264 | struct intel_vgpu_submission *s = &workload->vgpu->submission; | 1282 | struct intel_vgpu_submission *s = &workload->vgpu->submission; |
1265 | 1283 | ||
1284 | release_shadow_batch_buffer(workload); | ||
1285 | release_shadow_wa_ctx(&workload->wa_ctx); | ||
1286 | |||
1266 | if (workload->shadow_mm) | 1287 | if (workload->shadow_mm) |
1267 | intel_vgpu_mm_put(workload->shadow_mm); | 1288 | intel_vgpu_mm_put(workload->shadow_mm); |
1268 | 1289 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..2065cba59aab 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -83,6 +83,7 @@ struct intel_vgpu_workload { | |||
83 | struct i915_request *req; | 83 | struct i915_request *req; |
84 | /* if this workload has been dispatched to i915? */ | 84 | /* if this workload has been dispatched to i915? */ |
85 | bool dispatched; | 85 | bool dispatched; |
86 | bool shadow; /* if workload has done shadow of guest request */ | ||
86 | int status; | 87 | int status; |
87 | 88 | ||
88 | struct intel_vgpu_mm *shadow_mm; | 89 | struct intel_vgpu_mm *shadow_mm; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 38dcee1ca062..40a61ef9aac1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -984,8 +984,8 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file) | |||
984 | intel_runtime_pm_get(i915); | 984 | intel_runtime_pm_get(i915); |
985 | gpu = i915_capture_gpu_state(i915); | 985 | gpu = i915_capture_gpu_state(i915); |
986 | intel_runtime_pm_put(i915); | 986 | intel_runtime_pm_put(i915); |
987 | if (!gpu) | 987 | if (IS_ERR(gpu)) |
988 | return -ENOMEM; | 988 | return PTR_ERR(gpu); |
989 | 989 | ||
990 | file->private_data = gpu; | 990 | file->private_data = gpu; |
991 | return 0; | 991 | return 0; |
@@ -1018,7 +1018,13 @@ i915_error_state_write(struct file *filp, | |||
1018 | 1018 | ||
1019 | static int i915_error_state_open(struct inode *inode, struct file *file) | 1019 | static int i915_error_state_open(struct inode *inode, struct file *file) |
1020 | { | 1020 | { |
1021 | file->private_data = i915_first_error_state(inode->i_private); | 1021 | struct i915_gpu_state *error; |
1022 | |||
1023 | error = i915_first_error_state(inode->i_private); | ||
1024 | if (IS_ERR(error)) | ||
1025 | return PTR_ERR(error); | ||
1026 | |||
1027 | file->private_data = error; | ||
1022 | return 0; | 1028 | return 0; |
1023 | } | 1029 | } |
1024 | 1030 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1824 | return 0; | 1824 | return 0; |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | static inline bool | ||
1828 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | ||
1829 | unsigned long addr, unsigned long size) | ||
1830 | { | ||
1831 | if (vma->vm_file != filp) | ||
1832 | return false; | ||
1833 | |||
1834 | return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; | ||
1835 | } | ||
1836 | |||
1827 | /** | 1837 | /** |
1828 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 1838 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1829 | * it is mapped to. | 1839 | * it is mapped to. |
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1882 | return -EINTR; | 1892 | return -EINTR; |
1883 | } | 1893 | } |
1884 | vma = find_vma(mm, addr); | 1894 | vma = find_vma(mm, addr); |
1885 | if (vma) | 1895 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1886 | vma->vm_page_prot = | 1896 | vma->vm_page_prot = |
1887 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 1897 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1888 | else | 1898 | else |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index add1fe7aeb93..bd17dd1f5da5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2075,6 +2075,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) | |||
2075 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) | 2075 | int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) |
2076 | { | 2076 | { |
2077 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); | 2077 | struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); |
2078 | int err; | ||
2078 | 2079 | ||
2079 | /* | 2080 | /* |
2080 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt | 2081 | * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt |
@@ -2090,9 +2091,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) | |||
2090 | * allocator works in address space sizes, so it's multiplied by page | 2091 | * allocator works in address space sizes, so it's multiplied by page |
2091 | * size. We allocate at the top of the GTT to avoid fragmentation. | 2092 | * size. We allocate at the top of the GTT to avoid fragmentation. |
2092 | */ | 2093 | */ |
2093 | return i915_vma_pin(ppgtt->vma, | 2094 | err = i915_vma_pin(ppgtt->vma, |
2094 | 0, GEN6_PD_ALIGN, | 2095 | 0, GEN6_PD_ALIGN, |
2095 | PIN_GLOBAL | PIN_HIGH); | 2096 | PIN_GLOBAL | PIN_HIGH); |
2097 | if (err) | ||
2098 | goto unpin; | ||
2099 | |||
2100 | return 0; | ||
2101 | |||
2102 | unpin: | ||
2103 | ppgtt->pin_count = 0; | ||
2104 | return err; | ||
2096 | } | 2105 | } |
2097 | 2106 | ||
2098 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) | 2107 | void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 07465123c166..3f9ce403c755 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -1907,9 +1907,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915) | |||
1907 | { | 1907 | { |
1908 | struct i915_gpu_state *error; | 1908 | struct i915_gpu_state *error; |
1909 | 1909 | ||
1910 | /* Check if GPU capture has been disabled */ | ||
1911 | error = READ_ONCE(i915->gpu_error.first_error); | ||
1912 | if (IS_ERR(error)) | ||
1913 | return error; | ||
1914 | |||
1910 | error = kzalloc(sizeof(*error), GFP_ATOMIC); | 1915 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
1911 | if (!error) | 1916 | if (!error) { |
1912 | return NULL; | 1917 | i915_disable_error_state(i915, -ENOMEM); |
1918 | return ERR_PTR(-ENOMEM); | ||
1919 | } | ||
1913 | 1920 | ||
1914 | kref_init(&error->ref); | 1921 | kref_init(&error->ref); |
1915 | error->i915 = i915; | 1922 | error->i915 = i915; |
@@ -1945,11 +1952,8 @@ void i915_capture_error_state(struct drm_i915_private *i915, | |||
1945 | return; | 1952 | return; |
1946 | 1953 | ||
1947 | error = i915_capture_gpu_state(i915); | 1954 | error = i915_capture_gpu_state(i915); |
1948 | if (!error) { | 1955 | if (IS_ERR(error)) |
1949 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | ||
1950 | i915_disable_error_state(i915, -ENOMEM); | ||
1951 | return; | 1956 | return; |
1952 | } | ||
1953 | 1957 | ||
1954 | i915_error_capture_msg(i915, error, engine_mask, error_msg); | 1958 | i915_error_capture_msg(i915, error, engine_mask, error_msg); |
1955 | DRM_INFO("%s\n", error->error_msg); | 1959 | DRM_INFO("%s\n", error->error_msg); |
@@ -1987,7 +1991,7 @@ i915_first_error_state(struct drm_i915_private *i915) | |||
1987 | 1991 | ||
1988 | spin_lock_irq(&i915->gpu_error.lock); | 1992 | spin_lock_irq(&i915->gpu_error.lock); |
1989 | error = i915->gpu_error.first_error; | 1993 | error = i915->gpu_error.first_error; |
1990 | if (error) | 1994 | if (!IS_ERR_OR_NULL(error)) |
1991 | i915_gpu_state_get(error); | 1995 | i915_gpu_state_get(error); |
1992 | spin_unlock_irq(&i915->gpu_error.lock); | 1996 | spin_unlock_irq(&i915->gpu_error.lock); |
1993 | 1997 | ||
@@ -2000,10 +2004,11 @@ void i915_reset_error_state(struct drm_i915_private *i915) | |||
2000 | 2004 | ||
2001 | spin_lock_irq(&i915->gpu_error.lock); | 2005 | spin_lock_irq(&i915->gpu_error.lock); |
2002 | error = i915->gpu_error.first_error; | 2006 | error = i915->gpu_error.first_error; |
2003 | i915->gpu_error.first_error = NULL; | 2007 | if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */ |
2008 | i915->gpu_error.first_error = NULL; | ||
2004 | spin_unlock_irq(&i915->gpu_error.lock); | 2009 | spin_unlock_irq(&i915->gpu_error.lock); |
2005 | 2010 | ||
2006 | if (!IS_ERR(error)) | 2011 | if (!IS_ERR_OR_NULL(error)) |
2007 | i915_gpu_state_put(error); | 2012 | i915_gpu_state_put(error); |
2008 | } | 2013 | } |
2009 | 2014 | ||
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) | |||
594 | * Update the bitmask of enabled events and increment | 594 | * Update the bitmask of enabled events and increment |
595 | * the event reference counter. | 595 | * the event reference counter. |
596 | */ | 596 | */ |
597 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 597 | BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); |
598 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); | ||
598 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); | 599 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
599 | i915->pmu.enable |= BIT_ULL(bit); | 600 | i915->pmu.enable |= BIT_ULL(bit); |
600 | i915->pmu.enable_count[bit]++; | 601 | i915->pmu.enable_count[bit]++; |
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) | |||
615 | engine = intel_engine_lookup_user(i915, | 616 | engine = intel_engine_lookup_user(i915, |
616 | engine_event_class(event), | 617 | engine_event_class(event), |
617 | engine_event_instance(event)); | 618 | engine_event_instance(event)); |
618 | GEM_BUG_ON(!engine); | ||
619 | engine->pmu.enable |= BIT(sample); | ||
620 | 619 | ||
621 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 620 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != |
621 | I915_ENGINE_SAMPLE_COUNT); | ||
622 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != | ||
623 | I915_ENGINE_SAMPLE_COUNT); | ||
624 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); | ||
625 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
622 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); | 626 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
627 | |||
628 | engine->pmu.enable |= BIT(sample); | ||
623 | engine->pmu.enable_count[sample]++; | 629 | engine->pmu.enable_count[sample]++; |
624 | } | 630 | } |
625 | 631 | ||
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) | |||
649 | engine = intel_engine_lookup_user(i915, | 655 | engine = intel_engine_lookup_user(i915, |
650 | engine_event_class(event), | 656 | engine_event_class(event), |
651 | engine_event_instance(event)); | 657 | engine_event_instance(event)); |
652 | GEM_BUG_ON(!engine); | 658 | |
653 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 659 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); |
660 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
654 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); | 661 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
662 | |||
655 | /* | 663 | /* |
656 | * Decrement the reference count and clear the enabled | 664 | * Decrement the reference count and clear the enabled |
657 | * bitmask when the last listener on an event goes away. | 665 | * bitmask when the last listener on an event goes away. |
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) | |||
660 | engine->pmu.enable &= ~BIT(sample); | 668 | engine->pmu.enable &= ~BIT(sample); |
661 | } | 669 | } |
662 | 670 | ||
663 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 671 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); |
664 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); | 672 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
665 | /* | 673 | /* |
666 | * Decrement the reference count and clear the enabled | 674 | * Decrement the reference count and clear the enabled |
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
@@ -31,6 +31,8 @@ enum { | |||
31 | ((1 << I915_PMU_SAMPLE_BITS) + \ | 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ |
32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) | 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) |
33 | 33 | ||
34 | #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) | ||
35 | |||
34 | struct i915_pmu_sample { | 36 | struct i915_pmu_sample { |
35 | u64 cur; | 37 | u64 cur; |
36 | }; | 38 | }; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1790,7 +1790,7 @@ enum i915_power_well_id { | |||
1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 | 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 |
1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 | 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 |
1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 | 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 |
1793 | #define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ | 1793 | #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ |
1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
@@ -1798,7 +1798,7 @@ enum i915_power_well_id { | |||
1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ | 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ |
1800 | 4 * (dw)) | 1800 | 4 * (dw)) |
1801 | #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ | 1801 | #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ |
1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ | 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ |
1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
@@ -1834,9 +1834,9 @@ enum i915_power_well_id { | |||
1834 | 1834 | ||
1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 | 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 |
1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 | 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 |
1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) | 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) |
1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) | 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) |
1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ |
1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
1841 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
@@ -1864,8 +1864,12 @@ enum i915_power_well_id { | |||
1864 | #define RTERM_SELECT(x) ((x) << 3) | 1864 | #define RTERM_SELECT(x) ((x) << 3) |
1865 | #define RTERM_SELECT_MASK (0x7 << 3) | 1865 | #define RTERM_SELECT_MASK (0x7 << 3) |
1866 | 1866 | ||
1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) | 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) |
1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) | 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) |
1869 | #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) | ||
1870 | #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) | ||
1871 | #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) | ||
1872 | #define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) | ||
1869 | #define N_SCALAR(x) ((x) << 24) | 1873 | #define N_SCALAR(x) ((x) << 24) |
1870 | #define N_SCALAR_MASK (0x7F << 24) | 1874 | #define N_SCALAR_MASK (0x7F << 24) |
1871 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 535caebd9813..c0cfe7ae2ba5 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -521,7 +521,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, | |||
521 | ssize_t ret; | 521 | ssize_t ret; |
522 | 522 | ||
523 | gpu = i915_first_error_state(i915); | 523 | gpu = i915_first_error_state(i915); |
524 | if (gpu) { | 524 | if (IS_ERR(gpu)) { |
525 | ret = PTR_ERR(gpu); | ||
526 | } else if (gpu) { | ||
525 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); | 527 | ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count); |
526 | i915_gpu_state_put(gpu); | 528 | i915_gpu_state_put(gpu); |
527 | } else { | 529 | } else { |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { | |||
494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ | 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ |
495 | }; | 495 | }; |
496 | 496 | ||
497 | struct icl_combo_phy_ddi_buf_trans { | 497 | /* icl_combo_phy_ddi_translations */ |
498 | u32 dw2_swing_select; | 498 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { |
499 | u32 dw2_swing_scalar; | 499 | /* NT mV Trans mV db */ |
500 | u32 dw4_scaling; | 500 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
501 | }; | 501 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
502 | 502 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ | |
503 | /* Voltage Swing Programming for VccIO 0.85V for DP */ | 503 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
504 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { | 504 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
505 | /* Voltage mV db */ | 505 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
506 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 506 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
507 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 507 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
508 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 508 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
509 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 509 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
510 | { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ | ||
511 | { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ | ||
512 | { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ | ||
513 | { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ | ||
514 | { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ | ||
515 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
516 | }; | ||
517 | |||
518 | /* FIXME - After table is updated in Bspec */ | ||
519 | /* Voltage Swing Programming for VccIO 0.85V for eDP */ | ||
520 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { | ||
521 | /* Voltage mV db */ | ||
522 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | ||
523 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | ||
524 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | ||
525 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | ||
526 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | ||
527 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
528 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
529 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
530 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
531 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
532 | }; | ||
533 | |||
534 | /* Voltage Swing Programming for VccIO 0.95V for DP */ | ||
535 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { | ||
536 | /* Voltage mV db */ | ||
537 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | ||
538 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | ||
539 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | ||
540 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | ||
541 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | ||
542 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | ||
543 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | ||
544 | { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ | ||
545 | { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ | ||
546 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
547 | }; | 510 | }; |
548 | 511 | ||
549 | /* FIXME - After table is updated in Bspec */ | 512 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { |
550 | /* Voltage Swing Programming for VccIO 0.95V for eDP */ | 513 | /* NT mV Trans mV db */ |
551 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { | 514 | { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ |
552 | /* Voltage mV db */ | 515 | { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ |
553 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 516 | { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ |
554 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 517 | { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ |
555 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 518 | { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ |
556 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 519 | { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ |
557 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 520 | { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ |
558 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | 521 | { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ |
559 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | 522 | { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ |
560 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | 523 | { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
561 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
562 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
563 | }; | 524 | }; |
564 | 525 | ||
565 | /* Voltage Swing Programming for VccIO 1.05V for DP */ | 526 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { |
566 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { | 527 | /* NT mV Trans mV db */ |
567 | /* Voltage mV db */ | 528 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
568 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 529 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
569 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 530 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ |
570 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 531 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
571 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 532 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
572 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | 533 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
573 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | 534 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
574 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | 535 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
575 | { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ | 536 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
576 | { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ | 537 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
577 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
578 | }; | 538 | }; |
579 | 539 | ||
580 | /* FIXME - After table is updated in Bspec */ | 540 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { |
581 | /* Voltage Swing Programming for VccIO 1.05V for eDP */ | 541 | /* NT mV Trans mV db */ |
582 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { | 542 | { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ |
583 | /* Voltage mV db */ | 543 | { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ |
584 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 544 | { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ |
585 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 545 | { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ |
586 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 546 | { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ |
587 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 547 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ |
588 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 548 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ |
589 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
590 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
591 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
592 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
593 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
594 | }; | 549 | }; |
595 | 550 | ||
596 | struct icl_mg_phy_ddi_buf_trans { | 551 | struct icl_mg_phy_ddi_buf_trans { |
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
871 | } | 826 | } |
872 | } | 827 | } |
873 | 828 | ||
874 | static const struct icl_combo_phy_ddi_buf_trans * | 829 | static const struct cnl_ddi_buf_trans * |
875 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, | 830 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, |
876 | int type, int *n_entries) | 831 | int type, int rate, int *n_entries) |
877 | { | 832 | { |
878 | u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; | 833 | if (type == INTEL_OUTPUT_HDMI) { |
879 | 834 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); | |
880 | if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { | 835 | return icl_combo_phy_ddi_translations_hdmi; |
881 | switch (voltage) { | 836 | } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { |
882 | case VOLTAGE_INFO_0_85V: | 837 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); |
883 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); | 838 | return icl_combo_phy_ddi_translations_edp_hbr3; |
884 | return icl_combo_phy_ddi_translations_edp_0_85V; | 839 | } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { |
885 | case VOLTAGE_INFO_0_95V: | 840 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); |
886 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); | 841 | return icl_combo_phy_ddi_translations_edp_hbr2; |
887 | return icl_combo_phy_ddi_translations_edp_0_95V; | ||
888 | case VOLTAGE_INFO_1_05V: | ||
889 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); | ||
890 | return icl_combo_phy_ddi_translations_edp_1_05V; | ||
891 | default: | ||
892 | MISSING_CASE(voltage); | ||
893 | return NULL; | ||
894 | } | ||
895 | } else { | ||
896 | switch (voltage) { | ||
897 | case VOLTAGE_INFO_0_85V: | ||
898 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); | ||
899 | return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; | ||
900 | case VOLTAGE_INFO_0_95V: | ||
901 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); | ||
902 | return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; | ||
903 | case VOLTAGE_INFO_1_05V: | ||
904 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); | ||
905 | return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; | ||
906 | default: | ||
907 | MISSING_CASE(voltage); | ||
908 | return NULL; | ||
909 | } | ||
910 | } | 842 | } |
843 | |||
844 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); | ||
845 | return icl_combo_phy_ddi_translations_dp_hbr2; | ||
911 | } | 846 | } |
912 | 847 | ||
913 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | 848 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) |
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
918 | 853 | ||
919 | if (IS_ICELAKE(dev_priv)) { | 854 | if (IS_ICELAKE(dev_priv)) { |
920 | if (intel_port_is_combophy(dev_priv, port)) | 855 | if (intel_port_is_combophy(dev_priv, port)) |
921 | icl_get_combo_buf_trans(dev_priv, port, | 856 | icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, |
922 | INTEL_OUTPUT_HDMI, &n_entries); | 857 | 0, &n_entries); |
923 | else | 858 | else |
924 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 859 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
925 | default_entry = n_entries - 1; | 860 | default_entry = n_entries - 1; |
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
1086 | return DDI_CLK_SEL_TBT_810; | 1021 | return DDI_CLK_SEL_TBT_810; |
1087 | default: | 1022 | default: |
1088 | MISSING_CASE(clock); | 1023 | MISSING_CASE(clock); |
1089 | break; | 1024 | return DDI_CLK_SEL_NONE; |
1090 | } | 1025 | } |
1091 | case DPLL_ID_ICL_MGPLL1: | 1026 | case DPLL_ID_ICL_MGPLL1: |
1092 | case DPLL_ID_ICL_MGPLL2: | 1027 | case DPLL_ID_ICL_MGPLL2: |
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2275 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) | 2210 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) |
2276 | { | 2211 | { |
2277 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2212 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2213 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
2278 | enum port port = encoder->port; | 2214 | enum port port = encoder->port; |
2279 | int n_entries; | 2215 | int n_entries; |
2280 | 2216 | ||
2281 | if (IS_ICELAKE(dev_priv)) { | 2217 | if (IS_ICELAKE(dev_priv)) { |
2282 | if (intel_port_is_combophy(dev_priv, port)) | 2218 | if (intel_port_is_combophy(dev_priv, port)) |
2283 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, | 2219 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, |
2284 | &n_entries); | 2220 | intel_dp->link_rate, &n_entries); |
2285 | else | 2221 | else |
2286 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 2222 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
2287 | } else if (IS_CANNONLAKE(dev_priv)) { | 2223 | } else if (IS_CANNONLAKE(dev_priv)) { |
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2462 | } | 2398 | } |
2463 | 2399 | ||
2464 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | 2400 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, |
2465 | u32 level, enum port port, int type) | 2401 | u32 level, enum port port, int type, |
2402 | int rate) | ||
2466 | { | 2403 | { |
2467 | const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; | 2404 | const struct cnl_ddi_buf_trans *ddi_translations = NULL; |
2468 | u32 n_entries, val; | 2405 | u32 n_entries, val; |
2469 | int ln; | 2406 | int ln; |
2470 | 2407 | ||
2471 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, | 2408 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, |
2472 | &n_entries); | 2409 | rate, &n_entries); |
2473 | if (!ddi_translations) | 2410 | if (!ddi_translations) |
2474 | return; | 2411 | return; |
2475 | 2412 | ||
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2478 | level = n_entries - 1; | 2415 | level = n_entries - 1; |
2479 | } | 2416 | } |
2480 | 2417 | ||
2481 | /* Set PORT_TX_DW5 Rterm Sel to 110b. */ | 2418 | /* Set PORT_TX_DW5 */ |
2482 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2419 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
2483 | val &= ~RTERM_SELECT_MASK; | 2420 | val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | |
2421 | TAP2_DISABLE | TAP3_DISABLE); | ||
2422 | val |= SCALING_MODE_SEL(0x2); | ||
2484 | val |= RTERM_SELECT(0x6); | 2423 | val |= RTERM_SELECT(0x6); |
2485 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2424 | val |= TAP3_DISABLE; |
2486 | |||
2487 | /* Program PORT_TX_DW5 */ | ||
2488 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
2489 | /* Set DisableTap2 and DisableTap3 if MIPI DSI | ||
2490 | * Clear DisableTap2 and DisableTap3 for all other Ports | ||
2491 | */ | ||
2492 | if (type == INTEL_OUTPUT_DSI) { | ||
2493 | val |= TAP2_DISABLE; | ||
2494 | val |= TAP3_DISABLE; | ||
2495 | } else { | ||
2496 | val &= ~TAP2_DISABLE; | ||
2497 | val &= ~TAP3_DISABLE; | ||
2498 | } | ||
2499 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2425 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2500 | 2426 | ||
2501 | /* Program PORT_TX_DW2 */ | 2427 | /* Program PORT_TX_DW2 */ |
2502 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | 2428 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); |
2503 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | 2429 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | |
2504 | RCOMP_SCALAR_MASK); | 2430 | RCOMP_SCALAR_MASK); |
2505 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); | 2431 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); |
2506 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); | 2432 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); |
2507 | /* Program Rcomp scalar for every table entry */ | 2433 | /* Program Rcomp scalar for every table entry */ |
2508 | val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); | 2434 | val |= RCOMP_SCALAR(0x98); |
2509 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); | 2435 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); |
2510 | 2436 | ||
2511 | /* Program PORT_TX_DW4 */ | 2437 | /* Program PORT_TX_DW4 */ |
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2514 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); | 2440 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); |
2515 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | 2441 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | |
2516 | CURSOR_COEFF_MASK); | 2442 | CURSOR_COEFF_MASK); |
2517 | val |= ddi_translations[level].dw4_scaling; | 2443 | val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); |
2444 | val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); | ||
2445 | val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); | ||
2518 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); | 2446 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); |
2519 | } | 2447 | } |
2448 | |||
2449 | /* Program PORT_TX_DW7 */ | ||
2450 | val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); | ||
2451 | val &= ~N_SCALAR_MASK; | ||
2452 | val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); | ||
2453 | I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); | ||
2520 | } | 2454 | } |
2521 | 2455 | ||
2522 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | 2456 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, |
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2581 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2515 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2582 | 2516 | ||
2583 | /* 5. Program swing and de-emphasis */ | 2517 | /* 5. Program swing and de-emphasis */ |
2584 | icl_ddi_combo_vswing_program(dev_priv, level, port, type); | 2518 | icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); |
2585 | 2519 | ||
2586 | /* 6. Set training enable to trigger update */ | 2520 | /* 6. Set training enable to trigger update */ |
2587 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2521 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15415 | } | 15415 | } |
15416 | } | 15416 | } |
15417 | 15417 | ||
15418 | static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) | ||
15419 | { | ||
15420 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
15421 | |||
15422 | /* | ||
15423 | * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram | ||
15424 | * the hardware when a high res displays plugged in. DPLL P | ||
15425 | * divider is zero, and the pipe timings are bonkers. We'll | ||
15426 | * try to disable everything in that case. | ||
15427 | * | ||
15428 | * FIXME would be nice to be able to sanitize this state | ||
15429 | * without several WARNs, but for now let's take the easy | ||
15430 | * road. | ||
15431 | */ | ||
15432 | return IS_GEN6(dev_priv) && | ||
15433 | crtc_state->base.active && | ||
15434 | crtc_state->shared_dpll && | ||
15435 | crtc_state->port_clock == 0; | ||
15436 | } | ||
15437 | |||
15418 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15438 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
15419 | { | 15439 | { |
15420 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 15440 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
15421 | struct intel_connector *connector; | 15441 | struct intel_connector *connector; |
15442 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
15443 | struct intel_crtc_state *crtc_state = crtc ? | ||
15444 | to_intel_crtc_state(crtc->base.state) : NULL; | ||
15422 | 15445 | ||
15423 | /* We need to check both for a crtc link (meaning that the | 15446 | /* We need to check both for a crtc link (meaning that the |
15424 | * encoder is active and trying to read from a pipe) and the | 15447 | * encoder is active and trying to read from a pipe) and the |
15425 | * pipe itself being active. */ | 15448 | * pipe itself being active. */ |
15426 | bool has_active_crtc = encoder->base.crtc && | 15449 | bool has_active_crtc = crtc_state && |
15427 | to_intel_crtc(encoder->base.crtc)->active; | 15450 | crtc_state->base.active; |
15451 | |||
15452 | if (crtc_state && has_bogus_dpll_config(crtc_state)) { | ||
15453 | DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", | ||
15454 | pipe_name(crtc->pipe)); | ||
15455 | has_active_crtc = false; | ||
15456 | } | ||
15428 | 15457 | ||
15429 | connector = intel_encoder_find_connector(encoder); | 15458 | connector = intel_encoder_find_connector(encoder); |
15430 | if (connector && !has_active_crtc) { | 15459 | if (connector && !has_active_crtc) { |
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
15435 | /* Connector is active, but has no active pipe. This is | 15464 | /* Connector is active, but has no active pipe. This is |
15436 | * fallout from our resume register restoring. Disable | 15465 | * fallout from our resume register restoring. Disable |
15437 | * the encoder manually again. */ | 15466 | * the encoder manually again. */ |
15438 | if (encoder->base.crtc) { | 15467 | if (crtc_state) { |
15439 | struct drm_crtc_state *crtc_state = encoder->base.crtc->state; | 15468 | struct drm_encoder *best_encoder; |
15440 | 15469 | ||
15441 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15470 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
15442 | encoder->base.base.id, | 15471 | encoder->base.base.id, |
15443 | encoder->base.name); | 15472 | encoder->base.name); |
15473 | |||
15474 | /* avoid oopsing in case the hooks consult best_encoder */ | ||
15475 | best_encoder = connector->base.state->best_encoder; | ||
15476 | connector->base.state->best_encoder = &encoder->base; | ||
15477 | |||
15444 | if (encoder->disable) | 15478 | if (encoder->disable) |
15445 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15479 | encoder->disable(encoder, crtc_state, |
15480 | connector->base.state); | ||
15446 | if (encoder->post_disable) | 15481 | if (encoder->post_disable) |
15447 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15482 | encoder->post_disable(encoder, crtc_state, |
15483 | connector->base.state); | ||
15484 | |||
15485 | connector->base.state->best_encoder = best_encoder; | ||
15448 | } | 15486 | } |
15449 | encoder->base.crtc = NULL; | 15487 | encoder->base.crtc = NULL; |
15450 | 15488 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) | |||
304 | static int icl_max_source_rate(struct intel_dp *intel_dp) | 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
305 | { | 305 | { |
306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
307 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
307 | enum port port = dig_port->base.port; | 308 | enum port port = dig_port->base.port; |
308 | 309 | ||
309 | if (port == PORT_B) | 310 | if (intel_port_is_combophy(dev_priv, port) && |
311 | !intel_dp_is_edp(intel_dp)) | ||
310 | return 540000; | 312 | return 540000; |
311 | 313 | ||
312 | return 810000; | 314 | return 810000; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,6 +209,16 @@ struct intel_fbdev { | |||
209 | unsigned long vma_flags; | 209 | unsigned long vma_flags; |
210 | async_cookie_t cookie; | 210 | async_cookie_t cookie; |
211 | int preferred_bpp; | 211 | int preferred_bpp; |
212 | |||
213 | /* Whether or not fbdev hpd processing is temporarily suspended */ | ||
214 | bool hpd_suspended : 1; | ||
215 | /* Set when a hotplug was received while HPD processing was | ||
216 | * suspended | ||
217 | */ | ||
218 | bool hpd_waiting : 1; | ||
219 | |||
220 | /* Protects hpd_suspended */ | ||
221 | struct mutex hpd_lock; | ||
212 | }; | 222 | }; |
213 | 223 | ||
214 | struct intel_encoder { | 224 | struct intel_encoder { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..4ee16b264dbe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
336 | bool *enabled, int width, int height) | 336 | bool *enabled, int width, int height) |
337 | { | 337 | { |
338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | 338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); |
339 | unsigned long conn_configured, conn_seq, mask; | ||
340 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | 339 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); |
340 | unsigned long conn_configured, conn_seq; | ||
341 | int i, j; | 341 | int i, j; |
342 | bool *save_enabled; | 342 | bool *save_enabled; |
343 | bool fallback = true, ret = true; | 343 | bool fallback = true, ret = true; |
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
355 | drm_modeset_backoff(&ctx); | 355 | drm_modeset_backoff(&ctx); |
356 | 356 | ||
357 | memcpy(save_enabled, enabled, count); | 357 | memcpy(save_enabled, enabled, count); |
358 | mask = GENMASK(count - 1, 0); | 358 | conn_seq = GENMASK(count - 1, 0); |
359 | conn_configured = 0; | 359 | conn_configured = 0; |
360 | retry: | 360 | retry: |
361 | conn_seq = conn_configured; | ||
362 | for (i = 0; i < count; i++) { | 361 | for (i = 0; i < count; i++) { |
363 | struct drm_fb_helper_connector *fb_conn; | 362 | struct drm_fb_helper_connector *fb_conn; |
364 | struct drm_connector *connector; | 363 | struct drm_connector *connector; |
@@ -371,7 +370,8 @@ retry: | |||
371 | if (conn_configured & BIT(i)) | 370 | if (conn_configured & BIT(i)) |
372 | continue; | 371 | continue; |
373 | 372 | ||
374 | if (conn_seq == 0 && !connector->has_tile) | 373 | /* First pass, only consider tiled connectors */ |
374 | if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) | ||
375 | continue; | 375 | continue; |
376 | 376 | ||
377 | if (connector->status == connector_status_connected) | 377 | if (connector->status == connector_status_connected) |
@@ -475,8 +475,10 @@ retry: | |||
475 | conn_configured |= BIT(i); | 475 | conn_configured |= BIT(i); |
476 | } | 476 | } |
477 | 477 | ||
478 | if ((conn_configured & mask) != mask && conn_configured != conn_seq) | 478 | if (conn_configured != conn_seq) { /* repeat until no more are found */ |
479 | conn_seq = conn_configured; | ||
479 | goto retry; | 480 | goto retry; |
481 | } | ||
480 | 482 | ||
481 | /* | 483 | /* |
482 | * If the BIOS didn't enable everything it could, fall back to have the | 484 | * If the BIOS didn't enable everything it could, fall back to have the |
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
679 | if (ifbdev == NULL) | 681 | if (ifbdev == NULL) |
680 | return -ENOMEM; | 682 | return -ENOMEM; |
681 | 683 | ||
684 | mutex_init(&ifbdev->hpd_lock); | ||
682 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); | 685 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); |
683 | 686 | ||
684 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 687 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) | |||
752 | intel_fbdev_destroy(ifbdev); | 755 | intel_fbdev_destroy(ifbdev); |
753 | } | 756 | } |
754 | 757 | ||
758 | /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD | ||
759 | * processing, fbdev will perform a full connector reprobe if a hotplug event | ||
760 | * was received while HPD was suspended. | ||
761 | */ | ||
762 | static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) | ||
763 | { | ||
764 | bool send_hpd = false; | ||
765 | |||
766 | mutex_lock(&ifbdev->hpd_lock); | ||
767 | ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; | ||
768 | send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; | ||
769 | ifbdev->hpd_waiting = false; | ||
770 | mutex_unlock(&ifbdev->hpd_lock); | ||
771 | |||
772 | if (send_hpd) { | ||
773 | DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); | ||
774 | drm_fb_helper_hotplug_event(&ifbdev->helper); | ||
775 | } | ||
776 | } | ||
777 | |||
755 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) | 778 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) |
756 | { | 779 | { |
757 | struct drm_i915_private *dev_priv = to_i915(dev); | 780 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
773 | */ | 796 | */ |
774 | if (state != FBINFO_STATE_RUNNING) | 797 | if (state != FBINFO_STATE_RUNNING) |
775 | flush_work(&dev_priv->fbdev_suspend_work); | 798 | flush_work(&dev_priv->fbdev_suspend_work); |
799 | |||
776 | console_lock(); | 800 | console_lock(); |
777 | } else { | 801 | } else { |
778 | /* | 802 | /* |
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
800 | 824 | ||
801 | drm_fb_helper_set_suspend(&ifbdev->helper, state); | 825 | drm_fb_helper_set_suspend(&ifbdev->helper, state); |
802 | console_unlock(); | 826 | console_unlock(); |
827 | |||
828 | intel_fbdev_hpd_set_suspend(ifbdev, state); | ||
803 | } | 829 | } |
804 | 830 | ||
805 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 831 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
806 | { | 832 | { |
807 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 833 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
834 | bool send_hpd; | ||
808 | 835 | ||
809 | if (!ifbdev) | 836 | if (!ifbdev) |
810 | return; | 837 | return; |
811 | 838 | ||
812 | intel_fbdev_sync(ifbdev); | 839 | intel_fbdev_sync(ifbdev); |
813 | if (ifbdev->vma || ifbdev->helper.deferred_setup) | 840 | |
841 | mutex_lock(&ifbdev->hpd_lock); | ||
842 | send_hpd = !ifbdev->hpd_suspended; | ||
843 | ifbdev->hpd_waiting = true; | ||
844 | mutex_unlock(&ifbdev->hpd_lock); | ||
845 | |||
846 | if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) | ||
814 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 847 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
815 | } | 848 | } |
816 | 849 | ||
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 4be167dcd209..eab9341a5152 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -303,6 +303,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) | |||
303 | */ | 303 | */ |
304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { | 304 | if (!(prio & I915_PRIORITY_NEWCLIENT)) { |
305 | prio |= I915_PRIORITY_NEWCLIENT; | 305 | prio |= I915_PRIORITY_NEWCLIENT; |
306 | active->sched.attr.priority = prio; | ||
306 | list_move_tail(&active->sched.link, | 307 | list_move_tail(&active->sched.link, |
307 | i915_sched_lookup_priolist(engine, prio)); | 308 | i915_sched_lookup_priolist(engine, prio)); |
308 | } | 309 | } |
@@ -645,6 +646,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) | |||
645 | int i; | 646 | int i; |
646 | 647 | ||
647 | priolist_for_each_request_consume(rq, rn, p, i) { | 648 | priolist_for_each_request_consume(rq, rn, p, i) { |
649 | GEM_BUG_ON(last && | ||
650 | need_preempt(engine, last, rq_prio(rq))); | ||
651 | |||
648 | /* | 652 | /* |
649 | * Can we combine this request with the current port? | 653 | * Can we combine this request with the current port? |
650 | * It has to be the same context/ringbuffer and not | 654 | * It has to be the same context/ringbuffer and not |
@@ -2244,6 +2248,8 @@ static int logical_ring_init(struct intel_engine_cs *engine) | |||
2244 | if (ret) | 2248 | if (ret) |
2245 | return ret; | 2249 | return ret; |
2246 | 2250 | ||
2251 | intel_engine_init_workarounds(engine); | ||
2252 | |||
2247 | if (HAS_LOGICAL_RING_ELSQ(i915)) { | 2253 | if (HAS_LOGICAL_RING_ELSQ(i915)) { |
2248 | execlists->submit_reg = i915->regs + | 2254 | execlists->submit_reg = i915->regs + |
2249 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); | 2255 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); |
@@ -2310,7 +2316,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine) | |||
2310 | } | 2316 | } |
2311 | 2317 | ||
2312 | intel_engine_init_whitelist(engine); | 2318 | intel_engine_init_whitelist(engine); |
2313 | intel_engine_init_workarounds(engine); | ||
2314 | 2319 | ||
2315 | return 0; | 2320 | return 0; |
2316 | } | 2321 | } |
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -55,7 +55,12 @@ | |||
55 | struct opregion_header { | 55 | struct opregion_header { |
56 | u8 signature[16]; | 56 | u8 signature[16]; |
57 | u32 size; | 57 | u32 size; |
58 | u32 opregion_ver; | 58 | struct { |
59 | u8 rsvd; | ||
60 | u8 revision; | ||
61 | u8 minor; | ||
62 | u8 major; | ||
63 | } __packed over; | ||
59 | u8 bios_ver[32]; | 64 | u8 bios_ver[32]; |
60 | u8 vbios_ver[16]; | 65 | u8 vbios_ver[16]; |
61 | u8 driver_ver[16]; | 66 | u8 driver_ver[16]; |
@@ -119,7 +124,8 @@ struct opregion_asle { | |||
119 | u64 fdss; | 124 | u64 fdss; |
120 | u32 fdsp; | 125 | u32 fdsp; |
121 | u32 stat; | 126 | u32 stat; |
122 | u64 rvda; /* Physical address of raw vbt data */ | 127 | u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) |
128 | * address of raw VBT data. */ | ||
123 | u32 rvds; /* Size of raw vbt data */ | 129 | u32 rvds; /* Size of raw vbt data */ |
124 | u8 rsvd[58]; | 130 | u8 rsvd[58]; |
125 | } __packed; | 131 | } __packed; |
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
925 | opregion->header = base; | 931 | opregion->header = base; |
926 | opregion->lid_state = base + ACPI_CLID; | 932 | opregion->lid_state = base + ACPI_CLID; |
927 | 933 | ||
934 | DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", | ||
935 | opregion->header->over.major, | ||
936 | opregion->header->over.minor, | ||
937 | opregion->header->over.revision); | ||
938 | |||
928 | mboxes = opregion->header->mboxes; | 939 | mboxes = opregion->header->mboxes; |
929 | if (mboxes & MBOX_ACPI) { | 940 | if (mboxes & MBOX_ACPI) { |
930 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
953 | if (dmi_check_system(intel_no_opregion_vbt)) | 964 | if (dmi_check_system(intel_no_opregion_vbt)) |
954 | goto out; | 965 | goto out; |
955 | 966 | ||
956 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | 967 | if (opregion->header->over.major >= 2 && opregion->asle && |
957 | opregion->asle->rvda && opregion->asle->rvds) { | 968 | opregion->asle->rvda && opregion->asle->rvds) { |
958 | opregion->rvda = memremap(opregion->asle->rvda, | 969 | resource_size_t rvda = opregion->asle->rvda; |
959 | opregion->asle->rvds, | 970 | |
971 | /* | ||
972 | * opregion 2.0: rvda is the physical VBT address. | ||
973 | * | ||
974 | * opregion 2.1+: rvda is unsigned, relative offset from | ||
975 | * opregion base, and should never point within opregion. | ||
976 | */ | ||
977 | if (opregion->header->over.major > 2 || | ||
978 | opregion->header->over.minor >= 1) { | ||
979 | WARN_ON(rvda < OPREGION_SIZE); | ||
980 | |||
981 | rvda += asls; | ||
982 | } | ||
983 | |||
984 | opregion->rvda = memremap(rvda, opregion->asle->rvds, | ||
960 | MEMREMAP_WB); | 985 | MEMREMAP_WB); |
986 | |||
961 | vbt = opregion->rvda; | 987 | vbt = opregion->rvda; |
962 | vbt_size = opregion->asle->rvds; | 988 | vbt_size = opregion->asle->rvds; |
963 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 989 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
967 | goto out; | 993 | goto out; |
968 | } else { | 994 | } else { |
969 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); | 995 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); |
996 | memunmap(opregion->rvda); | ||
997 | opregion->rvda = NULL; | ||
970 | } | 998 | } |
971 | } | 999 | } |
972 | 1000 | ||
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 419e56342523..f71970df9936 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c | |||
@@ -274,10 +274,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) | |||
274 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", | 274 | DRM_DEBUG_KMS("eDP panel supports PSR version %x\n", |
275 | intel_dp->psr_dpcd[0]); | 275 | intel_dp->psr_dpcd[0]); |
276 | 276 | ||
277 | if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) { | ||
278 | DRM_DEBUG_KMS("PSR support not currently available for this panel\n"); | ||
279 | return; | ||
280 | } | ||
281 | |||
277 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { | 282 | if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) { |
278 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); | 283 | DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n"); |
279 | return; | 284 | return; |
280 | } | 285 | } |
286 | |||
281 | dev_priv->psr.sink_support = true; | 287 | dev_priv->psr.sink_support = true; |
282 | dev_priv->psr.sink_sync_latency = | 288 | dev_priv->psr.sink_sync_latency = |
283 | intel_dp_get_sink_sync_latency(intel_dp); | 289 | intel_dp_get_sink_sync_latency(intel_dp); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -415,16 +415,17 @@ struct intel_engine_cs { | |||
415 | /** | 415 | /** |
416 | * @enable_count: Reference count for the enabled samplers. | 416 | * @enable_count: Reference count for the enabled samplers. |
417 | * | 417 | * |
418 | * Index number corresponds to the bit number from @enable. | 418 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. |
419 | */ | 419 | */ |
420 | unsigned int enable_count[I915_PMU_SAMPLE_BITS]; | 420 | unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; |
421 | /** | 421 | /** |
422 | * @sample: Counter values for sampling events. | 422 | * @sample: Counter values for sampling events. |
423 | * | 423 | * |
424 | * Our internal timer stores the current counters in this field. | 424 | * Our internal timer stores the current counters in this field. |
425 | * | ||
426 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. | ||
425 | */ | 427 | */ |
426 | #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) | 428 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; |
427 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; | ||
428 | } pmu; | 429 | } pmu; |
429 | 430 | ||
430 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, | |||
494 | 494 | ||
495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); | 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
496 | 496 | ||
497 | keymsk = key->channel_mask & 0x3ffffff; | 497 | keymsk = key->channel_mask & 0x7ffffff; |
498 | if (alpha < 0xff) | 498 | if (alpha < 0xff) |
499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; |
500 | 500 | ||