diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-01-12 05:01:12 -0500 |
commit | 1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch) | |
tree | 44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/gpu | |
parent | 03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff) | |
parent | f9eccf24615672896dc13251410c3f2f33a14f95 (diff) |
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano:
- Fix the vt8500 timer leading to a system lock up when dealing with too
small delta (Roman Volkov)
- Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST
(Daniel Lezcano)
- Prevent to compile timers using the 'iomem' API when the architecture has
not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/gpu')
98 files changed, 2140 insertions, 1510 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 306f75700bf8..048cfe073dae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -496,6 +496,7 @@ struct amdgpu_bo_va_mapping { | |||
496 | 496 | ||
497 | /* bo virtual addresses in a specific vm */ | 497 | /* bo virtual addresses in a specific vm */ |
498 | struct amdgpu_bo_va { | 498 | struct amdgpu_bo_va { |
499 | struct mutex mutex; | ||
499 | /* protected by bo being reserved */ | 500 | /* protected by bo being reserved */ |
500 | struct list_head bo_list; | 501 | struct list_head bo_list; |
501 | struct fence *last_pt_update; | 502 | struct fence *last_pt_update; |
@@ -538,6 +539,7 @@ struct amdgpu_bo { | |||
538 | /* Constant after initialization */ | 539 | /* Constant after initialization */ |
539 | struct amdgpu_device *adev; | 540 | struct amdgpu_device *adev; |
540 | struct drm_gem_object gem_base; | 541 | struct drm_gem_object gem_base; |
542 | struct amdgpu_bo *parent; | ||
541 | 543 | ||
542 | struct ttm_bo_kmap_obj dma_buf_vmap; | 544 | struct ttm_bo_kmap_obj dma_buf_vmap; |
543 | pid_t pid; | 545 | pid_t pid; |
@@ -928,8 +930,6 @@ struct amdgpu_vm_id { | |||
928 | }; | 930 | }; |
929 | 931 | ||
930 | struct amdgpu_vm { | 932 | struct amdgpu_vm { |
931 | struct mutex mutex; | ||
932 | |||
933 | struct rb_root va; | 933 | struct rb_root va; |
934 | 934 | ||
935 | /* protecting invalidated */ | 935 | /* protecting invalidated */ |
@@ -956,6 +956,8 @@ struct amdgpu_vm { | |||
956 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; | 956 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; |
957 | /* for interval tree */ | 957 | /* for interval tree */ |
958 | spinlock_t it_lock; | 958 | spinlock_t it_lock; |
959 | /* protecting freed */ | ||
960 | spinlock_t freed_lock; | ||
959 | }; | 961 | }; |
960 | 962 | ||
961 | struct amdgpu_vm_manager { | 963 | struct amdgpu_vm_manager { |
@@ -1262,7 +1264,8 @@ struct amdgpu_cs_parser { | |||
1262 | struct ww_acquire_ctx ticket; | 1264 | struct ww_acquire_ctx ticket; |
1263 | 1265 | ||
1264 | /* user fence */ | 1266 | /* user fence */ |
1265 | struct amdgpu_user_fence uf; | 1267 | struct amdgpu_user_fence uf; |
1268 | struct amdgpu_bo_list_entry uf_entry; | ||
1266 | }; | 1269 | }; |
1267 | 1270 | ||
1268 | struct amdgpu_job { | 1271 | struct amdgpu_job { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 3afcf0237c25..25a3e2485cc2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | ||
131 | struct drm_amdgpu_cs_chunk_fence *fence_data) | ||
132 | { | ||
133 | struct drm_gem_object *gobj; | ||
134 | uint32_t handle; | ||
135 | |||
136 | handle = fence_data->handle; | ||
137 | gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, | ||
138 | fence_data->handle); | ||
139 | if (gobj == NULL) | ||
140 | return -EINVAL; | ||
141 | |||
142 | p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | ||
143 | p->uf.offset = fence_data->offset; | ||
144 | |||
145 | if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { | ||
146 | drm_gem_object_unreference_unlocked(gobj); | ||
147 | return -EINVAL; | ||
148 | } | ||
149 | |||
150 | p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo); | ||
151 | p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
152 | p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | ||
153 | p->uf_entry.priority = 0; | ||
154 | p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; | ||
155 | p->uf_entry.tv.shared = true; | ||
156 | |||
157 | drm_gem_object_unreference_unlocked(gobj); | ||
158 | return 0; | ||
159 | } | ||
160 | |||
130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 161 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
131 | { | 162 | { |
132 | union drm_amdgpu_cs *cs = data; | 163 | union drm_amdgpu_cs *cs = data; |
@@ -207,26 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
207 | 238 | ||
208 | case AMDGPU_CHUNK_ID_FENCE: | 239 | case AMDGPU_CHUNK_ID_FENCE: |
209 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); | 240 | size = sizeof(struct drm_amdgpu_cs_chunk_fence); |
210 | if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { | 241 | if (p->chunks[i].length_dw * sizeof(uint32_t) < size) { |
211 | uint32_t handle; | ||
212 | struct drm_gem_object *gobj; | ||
213 | struct drm_amdgpu_cs_chunk_fence *fence_data; | ||
214 | |||
215 | fence_data = (void *)p->chunks[i].kdata; | ||
216 | handle = fence_data->handle; | ||
217 | gobj = drm_gem_object_lookup(p->adev->ddev, | ||
218 | p->filp, handle); | ||
219 | if (gobj == NULL) { | ||
220 | ret = -EINVAL; | ||
221 | goto free_partial_kdata; | ||
222 | } | ||
223 | |||
224 | p->uf.bo = gem_to_amdgpu_bo(gobj); | ||
225 | p->uf.offset = fence_data->offset; | ||
226 | } else { | ||
227 | ret = -EINVAL; | 242 | ret = -EINVAL; |
228 | goto free_partial_kdata; | 243 | goto free_partial_kdata; |
229 | } | 244 | } |
245 | |||
246 | ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata); | ||
247 | if (ret) | ||
248 | goto free_partial_kdata; | ||
249 | |||
230 | break; | 250 | break; |
231 | 251 | ||
232 | case AMDGPU_CHUNK_ID_DEPENDENCIES: | 252 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
@@ -389,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) | |||
389 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, | 409 | p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, |
390 | &p->validated); | 410 | &p->validated); |
391 | 411 | ||
412 | if (p->uf.bo) | ||
413 | list_add(&p->uf_entry.tv.head, &p->validated); | ||
414 | |||
392 | if (need_mmap_lock) | 415 | if (need_mmap_lock) |
393 | down_read(¤t->mm->mmap_sem); | 416 | down_read(¤t->mm->mmap_sem); |
394 | 417 | ||
@@ -486,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
486 | for (i = 0; i < parser->num_ibs; i++) | 509 | for (i = 0; i < parser->num_ibs; i++) |
487 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 510 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
488 | kfree(parser->ibs); | 511 | kfree(parser->ibs); |
489 | if (parser->uf.bo) | 512 | amdgpu_bo_unref(&parser->uf.bo); |
490 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | 513 | amdgpu_bo_unref(&parser->uf_entry.robj); |
491 | } | 514 | } |
492 | 515 | ||
493 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 516 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
@@ -776,7 +799,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) | |||
776 | amdgpu_ib_free(job->adev, &job->ibs[i]); | 799 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
777 | kfree(job->ibs); | 800 | kfree(job->ibs); |
778 | if (job->uf.bo) | 801 | if (job->uf.bo) |
779 | drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); | 802 | amdgpu_bo_unref(&job->uf.bo); |
780 | return 0; | 803 | return 0; |
781 | } | 804 | } |
782 | 805 | ||
@@ -784,8 +807,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
784 | { | 807 | { |
785 | struct amdgpu_device *adev = dev->dev_private; | 808 | struct amdgpu_device *adev = dev->dev_private; |
786 | union drm_amdgpu_cs *cs = data; | 809 | union drm_amdgpu_cs *cs = data; |
787 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | ||
788 | struct amdgpu_vm *vm = &fpriv->vm; | ||
789 | struct amdgpu_cs_parser parser = {}; | 810 | struct amdgpu_cs_parser parser = {}; |
790 | bool reserved_buffers = false; | 811 | bool reserved_buffers = false; |
791 | int i, r; | 812 | int i, r; |
@@ -803,7 +824,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
803 | r = amdgpu_cs_handle_lockup(adev, r); | 824 | r = amdgpu_cs_handle_lockup(adev, r); |
804 | return r; | 825 | return r; |
805 | } | 826 | } |
806 | mutex_lock(&vm->mutex); | ||
807 | r = amdgpu_cs_parser_relocs(&parser); | 827 | r = amdgpu_cs_parser_relocs(&parser); |
808 | if (r == -ENOMEM) | 828 | if (r == -ENOMEM) |
809 | DRM_ERROR("Not enough memory for command submission!\n"); | 829 | DRM_ERROR("Not enough memory for command submission!\n"); |
@@ -888,7 +908,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
888 | 908 | ||
889 | out: | 909 | out: |
890 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); | 910 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
891 | mutex_unlock(&vm->mutex); | ||
892 | r = amdgpu_cs_handle_lockup(adev, r); | 911 | r = amdgpu_cs_handle_lockup(adev, r); |
893 | return r; | 912 | return r; |
894 | } | 913 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e173a5a02f0d..5580d3420c3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
73 | struct drm_crtc *crtc = &amdgpuCrtc->base; | 73 | struct drm_crtc *crtc = &amdgpuCrtc->base; |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | unsigned i; | 75 | unsigned i; |
76 | int vpos, hpos, stat, min_udelay; | ||
77 | struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; | ||
76 | 78 | ||
77 | amdgpu_flip_wait_fence(adev, &work->excl); | 79 | amdgpu_flip_wait_fence(adev, &work->excl); |
78 | for (i = 0; i < work->shared_count; ++i) | 80 | for (i = 0; i < work->shared_count; ++i) |
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
81 | /* We borrow the event spin lock for protecting flip_status */ | 83 | /* We borrow the event spin lock for protecting flip_status */ |
82 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 84 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
83 | 85 | ||
86 | /* If this happens to execute within the "virtually extended" vblank | ||
87 | * interval before the start of the real vblank interval then it needs | ||
88 | * to delay programming the mmio flip until the real vblank is entered. | ||
89 | * This prevents completing a flip too early due to the way we fudge | ||
90 | * our vblank counter and vblank timestamps in order to work around the | ||
91 | * problem that the hw fires vblank interrupts before actual start of | ||
92 | * vblank (when line buffer refilling is done for a frame). It | ||
93 | * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for | ||
94 | * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. | ||
95 | * | ||
96 | * In practice this won't execute very often unless on very fast | ||
97 | * machines because the time window for this to happen is very small. | ||
98 | */ | ||
99 | for (;;) { | ||
100 | /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank | ||
101 | * start in hpos, and to the "fudged earlier" vblank start in | ||
102 | * vpos. | ||
103 | */ | ||
104 | stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, | ||
105 | GET_DISTANCE_TO_VBLANKSTART, | ||
106 | &vpos, &hpos, NULL, NULL, | ||
107 | &crtc->hwmode); | ||
108 | |||
109 | if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
110 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || | ||
111 | !(vpos >= 0 && hpos <= 0)) | ||
112 | break; | ||
113 | |||
114 | /* Sleep at least until estimated real start of hw vblank */ | ||
115 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
116 | min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); | ||
117 | usleep_range(min_udelay, 2 * min_udelay); | ||
118 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
119 | }; | ||
120 | |||
84 | /* do the flip (mmio) */ | 121 | /* do the flip (mmio) */ |
85 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); | 122 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); |
86 | /* set the flip status */ | 123 | /* set the flip status */ |
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) | |||
109 | } else | 146 | } else |
110 | DRM_ERROR("failed to reserve buffer after flip\n"); | 147 | DRM_ERROR("failed to reserve buffer after flip\n"); |
111 | 148 | ||
112 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 149 | amdgpu_bo_unref(&work->old_rbo); |
113 | kfree(work->shared); | 150 | kfree(work->shared); |
114 | kfree(work); | 151 | kfree(work); |
115 | } | 152 | } |
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | |||
148 | obj = old_amdgpu_fb->obj; | 185 | obj = old_amdgpu_fb->obj; |
149 | 186 | ||
150 | /* take a reference to the old object */ | 187 | /* take a reference to the old object */ |
151 | drm_gem_object_reference(obj); | ||
152 | work->old_rbo = gem_to_amdgpu_bo(obj); | 188 | work->old_rbo = gem_to_amdgpu_bo(obj); |
189 | amdgpu_bo_ref(work->old_rbo); | ||
153 | 190 | ||
154 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | 191 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); |
155 | obj = new_amdgpu_fb->obj; | 192 | obj = new_amdgpu_fb->obj; |
@@ -222,7 +259,7 @@ pflip_cleanup: | |||
222 | amdgpu_bo_unreserve(new_rbo); | 259 | amdgpu_bo_unreserve(new_rbo); |
223 | 260 | ||
224 | cleanup: | 261 | cleanup: |
225 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 262 | amdgpu_bo_unref(&work->old_rbo); |
226 | fence_put(work->excl); | 263 | fence_put(work->excl); |
227 | for (i = 0; i < work->shared_count; ++i) | 264 | for (i = 0; i < work->shared_count; ++i) |
228 | fence_put(work->shared[i]); | 265 | fence_put(work->shared[i]); |
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
712 | * \param dev Device to query. | 749 | * \param dev Device to query. |
713 | * \param pipe Crtc to query. | 750 | * \param pipe Crtc to query. |
714 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | 751 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
752 | * For driver internal use only also supports these flags: | ||
753 | * | ||
754 | * USE_REAL_VBLANKSTART to use the real start of vblank instead | ||
755 | * of a fudged earlier start of vblank. | ||
756 | * | ||
757 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the | ||
758 | * fudged earlier start of vblank in *vpos and the distance | ||
759 | * to true start of vblank in *hpos. | ||
760 | * | ||
715 | * \param *vpos Location where vertical scanout position should be stored. | 761 | * \param *vpos Location where vertical scanout position should be stored. |
716 | * \param *hpos Location where horizontal scanout position should go. | 762 | * \param *hpos Location where horizontal scanout position should go. |
717 | * \param *stime Target location for timestamp taken immediately before | 763 | * \param *stime Target location for timestamp taken immediately before |
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
776 | vbl_end = 0; | 822 | vbl_end = 0; |
777 | } | 823 | } |
778 | 824 | ||
825 | /* Called from driver internal vblank counter query code? */ | ||
826 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
827 | /* Caller wants distance from real vbl_start in *hpos */ | ||
828 | *hpos = *vpos - vbl_start; | ||
829 | } | ||
830 | |||
831 | /* Fudge vblank to start a few scanlines earlier to handle the | ||
832 | * problem that vblank irqs fire a few scanlines before start | ||
833 | * of vblank. Some driver internal callers need the true vblank | ||
834 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. | ||
835 | * | ||
836 | * The cause of the "early" vblank irq is that the irq is triggered | ||
837 | * by the line buffer logic when the line buffer read position enters | ||
838 | * the vblank, whereas our crtc scanout position naturally lags the | ||
839 | * line buffer read position. | ||
840 | */ | ||
841 | if (!(flags & USE_REAL_VBLANKSTART)) | ||
842 | vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; | ||
843 | |||
779 | /* Test scanout position against vblank region. */ | 844 | /* Test scanout position against vblank region. */ |
780 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | 845 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
781 | in_vbl = false; | 846 | in_vbl = false; |
782 | 847 | ||
848 | /* In vblank? */ | ||
849 | if (in_vbl) | ||
850 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
851 | |||
852 | /* Called from driver internal vblank counter query code? */ | ||
853 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
854 | /* Caller wants distance from fudged earlier vbl_start */ | ||
855 | *vpos -= vbl_start; | ||
856 | return ret; | ||
857 | } | ||
858 | |||
783 | /* Check if inside vblank area and apply corrective offsets: | 859 | /* Check if inside vblank area and apply corrective offsets: |
784 | * vpos will then be >=0 in video scanout area, but negative | 860 | * vpos will then be >=0 in video scanout area, but negative |
785 | * within vblank area, counting down the number of lines until | 861 | * within vblank area, counting down the number of lines until |
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
795 | /* Correct for shifted end of vbl at vbl_end. */ | 871 | /* Correct for shifted end of vbl at vbl_end. */ |
796 | *vpos = *vpos - vbl_end; | 872 | *vpos = *vpos - vbl_end; |
797 | 873 | ||
798 | /* In vblank? */ | ||
799 | if (in_vbl) | ||
800 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
801 | |||
802 | /* Is vpos outside nominal vblank area, but less than | ||
803 | * 1/100 of a frame height away from start of vblank? | ||
804 | * If so, assume this isn't a massively delayed vblank | ||
805 | * interrupt, but a vblank interrupt that fired a few | ||
806 | * microseconds before true start of vblank. Compensate | ||
807 | * by adding a full frame duration to the final timestamp. | ||
808 | * Happens, e.g., on ATI R500, R600. | ||
809 | * | ||
810 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
811 | */ | ||
812 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
813 | vbl_start = mode->crtc_vdisplay; | ||
814 | vtotal = mode->crtc_vtotal; | ||
815 | |||
816 | if (vbl_start - *vpos < vtotal / 100) { | ||
817 | *vpos -= vtotal; | ||
818 | |||
819 | /* Signal this correction as "applied". */ | ||
820 | ret |= 0x8; | ||
821 | } | ||
822 | } | ||
823 | |||
824 | return ret; | 874 | return ret; |
825 | } | 875 | } |
826 | 876 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 00c5b580f56c..9c253c535d26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -115,12 +115,9 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
115 | struct amdgpu_vm *vm = &fpriv->vm; | 115 | struct amdgpu_vm *vm = &fpriv->vm; |
116 | struct amdgpu_bo_va *bo_va; | 116 | struct amdgpu_bo_va *bo_va; |
117 | int r; | 117 | int r; |
118 | mutex_lock(&vm->mutex); | ||
119 | r = amdgpu_bo_reserve(rbo, false); | 118 | r = amdgpu_bo_reserve(rbo, false); |
120 | if (r) { | 119 | if (r) |
121 | mutex_unlock(&vm->mutex); | ||
122 | return r; | 120 | return r; |
123 | } | ||
124 | 121 | ||
125 | bo_va = amdgpu_vm_bo_find(vm, rbo); | 122 | bo_va = amdgpu_vm_bo_find(vm, rbo); |
126 | if (!bo_va) { | 123 | if (!bo_va) { |
@@ -129,7 +126,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
129 | ++bo_va->ref_count; | 126 | ++bo_va->ref_count; |
130 | } | 127 | } |
131 | amdgpu_bo_unreserve(rbo); | 128 | amdgpu_bo_unreserve(rbo); |
132 | mutex_unlock(&vm->mutex); | ||
133 | return 0; | 129 | return 0; |
134 | } | 130 | } |
135 | 131 | ||
@@ -142,10 +138,8 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
142 | struct amdgpu_vm *vm = &fpriv->vm; | 138 | struct amdgpu_vm *vm = &fpriv->vm; |
143 | struct amdgpu_bo_va *bo_va; | 139 | struct amdgpu_bo_va *bo_va; |
144 | int r; | 140 | int r; |
145 | mutex_lock(&vm->mutex); | ||
146 | r = amdgpu_bo_reserve(rbo, true); | 141 | r = amdgpu_bo_reserve(rbo, true); |
147 | if (r) { | 142 | if (r) { |
148 | mutex_unlock(&vm->mutex); | ||
149 | dev_err(adev->dev, "leaking bo va because " | 143 | dev_err(adev->dev, "leaking bo va because " |
150 | "we fail to reserve bo (%d)\n", r); | 144 | "we fail to reserve bo (%d)\n", r); |
151 | return; | 145 | return; |
@@ -157,7 +151,6 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
157 | } | 151 | } |
158 | } | 152 | } |
159 | amdgpu_bo_unreserve(rbo); | 153 | amdgpu_bo_unreserve(rbo); |
160 | mutex_unlock(&vm->mutex); | ||
161 | } | 154 | } |
162 | 155 | ||
163 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | 156 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
@@ -242,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
242 | AMDGPU_GEM_USERPTR_REGISTER)) | 235 | AMDGPU_GEM_USERPTR_REGISTER)) |
243 | return -EINVAL; | 236 | return -EINVAL; |
244 | 237 | ||
245 | if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || | 238 | if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( |
246 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { | 239 | !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || |
240 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { | ||
247 | 241 | ||
248 | /* if we want to write to it we must require anonymous | 242 | /* if we want to write to it we must require anonymous |
249 | memory and install a MMU notifier */ | 243 | memory and install a MMU notifier */ |
@@ -483,6 +477,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 477 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
484 | goto error_unreserve; | 478 | goto error_unreserve; |
485 | } | 479 | } |
480 | list_for_each_entry(entry, &duplicates, head) { | ||
481 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); | ||
482 | /* if anything is swapped out don't swap it in here, | ||
483 | just abort and wait for the next CS */ | ||
484 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | ||
485 | goto error_unreserve; | ||
486 | } | ||
487 | |||
486 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | 488 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
487 | if (r) | 489 | if (r) |
488 | goto error_unreserve; | 490 | goto error_unreserve; |
@@ -553,7 +555,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
553 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 555 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
554 | if (gobj == NULL) | 556 | if (gobj == NULL) |
555 | return -ENOENT; | 557 | return -ENOENT; |
556 | mutex_lock(&fpriv->vm.mutex); | ||
557 | rbo = gem_to_amdgpu_bo(gobj); | 558 | rbo = gem_to_amdgpu_bo(gobj); |
558 | INIT_LIST_HEAD(&list); | 559 | INIT_LIST_HEAD(&list); |
559 | INIT_LIST_HEAD(&duplicates); | 560 | INIT_LIST_HEAD(&duplicates); |
@@ -568,7 +569,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
568 | } | 569 | } |
569 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | 570 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
570 | if (r) { | 571 | if (r) { |
571 | mutex_unlock(&fpriv->vm.mutex); | ||
572 | drm_gem_object_unreference_unlocked(gobj); | 572 | drm_gem_object_unreference_unlocked(gobj); |
573 | return r; | 573 | return r; |
574 | } | 574 | } |
@@ -577,7 +577,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
577 | if (!bo_va) { | 577 | if (!bo_va) { |
578 | ttm_eu_backoff_reservation(&ticket, &list); | 578 | ttm_eu_backoff_reservation(&ticket, &list); |
579 | drm_gem_object_unreference_unlocked(gobj); | 579 | drm_gem_object_unreference_unlocked(gobj); |
580 | mutex_unlock(&fpriv->vm.mutex); | ||
581 | return -ENOENT; | 580 | return -ENOENT; |
582 | } | 581 | } |
583 | 582 | ||
@@ -602,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
602 | ttm_eu_backoff_reservation(&ticket, &list); | 601 | ttm_eu_backoff_reservation(&ticket, &list); |
603 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 602 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
604 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 603 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
605 | mutex_unlock(&fpriv->vm.mutex); | 604 | |
606 | drm_gem_object_unreference_unlocked(gobj); | 605 | drm_gem_object_unreference_unlocked(gobj); |
607 | return r; | 606 | return r; |
608 | } | 607 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1618e2294a16..e23843f4d877 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev, | |||
611 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) | 611 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) |
612 | { | 612 | { |
613 | struct amdgpu_device *adev = dev->dev_private; | 613 | struct amdgpu_device *adev = dev->dev_private; |
614 | int vpos, hpos, stat; | ||
615 | u32 count; | ||
614 | 616 | ||
615 | if (pipe >= adev->mode_info.num_crtc) { | 617 | if (pipe >= adev->mode_info.num_crtc) { |
616 | DRM_ERROR("Invalid crtc %u\n", pipe); | 618 | DRM_ERROR("Invalid crtc %u\n", pipe); |
617 | return -EINVAL; | 619 | return -EINVAL; |
618 | } | 620 | } |
619 | 621 | ||
620 | return amdgpu_display_vblank_get_counter(adev, pipe); | 622 | /* The hw increments its frame counter at start of vsync, not at start |
623 | * of vblank, as is required by DRM core vblank counter handling. | ||
624 | * Cook the hw count here to make it appear to the caller as if it | ||
625 | * incremented at start of vblank. We measure distance to start of | ||
626 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank | ||
627 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter | ||
628 | * result by 1 to give the proper appearance to caller. | ||
629 | */ | ||
630 | if (adev->mode_info.crtcs[pipe]) { | ||
631 | /* Repeat readout if needed to provide stable result if | ||
632 | * we cross start of vsync during the queries. | ||
633 | */ | ||
634 | do { | ||
635 | count = amdgpu_display_vblank_get_counter(adev, pipe); | ||
636 | /* Ask amdgpu_get_crtc_scanoutpos to return vpos as | ||
637 | * distance to start of vblank, instead of regular | ||
638 | * vertical scanout pos. | ||
639 | */ | ||
640 | stat = amdgpu_get_crtc_scanoutpos( | ||
641 | dev, pipe, GET_DISTANCE_TO_VBLANKSTART, | ||
642 | &vpos, &hpos, NULL, NULL, | ||
643 | &adev->mode_info.crtcs[pipe]->base.hwmode); | ||
644 | } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); | ||
645 | |||
646 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
647 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { | ||
648 | DRM_DEBUG_VBL("Query failed! stat %d\n", stat); | ||
649 | } else { | ||
650 | DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", | ||
651 | pipe, vpos); | ||
652 | |||
653 | /* Bump counter if we are at >= leading edge of vblank, | ||
654 | * but before vsync where vpos would turn negative and | ||
655 | * the hw counter really increments. | ||
656 | */ | ||
657 | if (vpos >= 0) | ||
658 | count++; | ||
659 | } | ||
660 | } else { | ||
661 | /* Fallback to use value as is. */ | ||
662 | count = amdgpu_display_vblank_get_counter(adev, pipe); | ||
663 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); | ||
664 | } | ||
665 | |||
666 | return count; | ||
621 | } | 667 | } |
622 | 668 | ||
623 | /** | 669 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b62c1710cab6..064ebb347074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -407,6 +407,7 @@ struct amdgpu_crtc { | |||
407 | u32 line_time; | 407 | u32 line_time; |
408 | u32 wm_low; | 408 | u32 wm_low; |
409 | u32 wm_high; | 409 | u32 wm_high; |
410 | u32 lb_vblank_lead_lines; | ||
410 | struct drm_display_mode hw_mode; | 411 | struct drm_display_mode hw_mode; |
411 | }; | 412 | }; |
412 | 413 | ||
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer { | |||
528 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ | 529 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ |
529 | ((em) == ATOM_ENCODER_MODE_DP_MST)) | 530 | ((em) == ATOM_ENCODER_MODE_DP_MST)) |
530 | 531 | ||
532 | /* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ | ||
533 | #define USE_REAL_VBLANKSTART (1 << 30) | ||
534 | #define GET_DISTANCE_TO_VBLANKSTART (1 << 31) | ||
535 | |||
531 | void amdgpu_link_encoder_connector(struct drm_device *dev); | 536 | void amdgpu_link_encoder_connector(struct drm_device *dev); |
532 | 537 | ||
533 | struct drm_connector * | 538 | struct drm_connector * |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0d524384ff79..c3ce103b6a33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
100 | list_del_init(&bo->list); | 100 | list_del_init(&bo->list); |
101 | mutex_unlock(&bo->adev->gem.mutex); | 101 | mutex_unlock(&bo->adev->gem.mutex); |
102 | drm_gem_object_release(&bo->gem_base); | 102 | drm_gem_object_release(&bo->gem_base); |
103 | amdgpu_bo_unref(&bo->parent); | ||
103 | kfree(bo->metadata); | 104 | kfree(bo->metadata); |
104 | kfree(bo); | 105 | kfree(bo); |
105 | } | 106 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d4bac5f49939..8a1752ff3d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
587 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | 587 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); |
588 | int r; | 588 | int r; |
589 | 589 | ||
590 | if (gtt->userptr) | 590 | if (gtt->userptr) { |
591 | amdgpu_ttm_tt_pin_userptr(ttm); | 591 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
592 | 592 | if (r) { | |
593 | DRM_ERROR("failed to pin userptr\n"); | ||
594 | return r; | ||
595 | } | ||
596 | } | ||
593 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | 597 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
594 | if (!ttm->num_pages) { | 598 | if (!ttm->num_pages) { |
595 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | 599 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |||
797 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | 801 | if (mem && mem->mem_type != TTM_PL_SYSTEM) |
798 | flags |= AMDGPU_PTE_VALID; | 802 | flags |= AMDGPU_PTE_VALID; |
799 | 803 | ||
800 | if (mem && mem->mem_type == TTM_PL_TT) | 804 | if (mem && mem->mem_type == TTM_PL_TT) { |
801 | flags |= AMDGPU_PTE_SYSTEM; | 805 | flags |= AMDGPU_PTE_SYSTEM; |
802 | 806 | ||
803 | if (!ttm || ttm->caching_state == tt_cached) | 807 | if (ttm->caching_state == tt_cached) |
804 | flags |= AMDGPU_PTE_SNOOPED; | 808 | flags |= AMDGPU_PTE_SNOOPED; |
809 | } | ||
805 | 810 | ||
806 | if (adev->asic_type >= CHIP_TOPAZ) | 811 | if (adev->asic_type >= CHIP_TOPAZ) |
807 | flags |= AMDGPU_PTE_EXECUTABLE; | 812 | flags |= AMDGPU_PTE_EXECUTABLE; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 03f0c3bae516..a745eeeb5d82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -392,7 +392,10 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
392 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ | 392 | ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ |
393 | ib->ptr[ib->length_dw++] = handle; | 393 | ib->ptr[ib->length_dw++] = handle; |
394 | 394 | ||
395 | ib->ptr[ib->length_dw++] = 0x00000030; /* len */ | 395 | if ((ring->adev->vce.fw_version >> 24) >= 52) |
396 | ib->ptr[ib->length_dw++] = 0x00000040; /* len */ | ||
397 | else | ||
398 | ib->ptr[ib->length_dw++] = 0x00000030; /* len */ | ||
396 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ | 399 | ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */ |
397 | ib->ptr[ib->length_dw++] = 0x00000000; | 400 | ib->ptr[ib->length_dw++] = 0x00000000; |
398 | ib->ptr[ib->length_dw++] = 0x00000042; | 401 | ib->ptr[ib->length_dw++] = 0x00000042; |
@@ -404,6 +407,12 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
404 | ib->ptr[ib->length_dw++] = 0x00000100; | 407 | ib->ptr[ib->length_dw++] = 0x00000100; |
405 | ib->ptr[ib->length_dw++] = 0x0000000c; | 408 | ib->ptr[ib->length_dw++] = 0x0000000c; |
406 | ib->ptr[ib->length_dw++] = 0x00000000; | 409 | ib->ptr[ib->length_dw++] = 0x00000000; |
410 | if ((ring->adev->vce.fw_version >> 24) >= 52) { | ||
411 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
412 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
413 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
414 | ib->ptr[ib->length_dw++] = 0x00000000; | ||
415 | } | ||
407 | 416 | ||
408 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ | 417 | ib->ptr[ib->length_dw++] = 0x00000014; /* len */ |
409 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ | 418 | ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 159ce54bbd8d..b53d273eb7a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |||
885 | struct amdgpu_bo_va_mapping *mapping; | 885 | struct amdgpu_bo_va_mapping *mapping; |
886 | int r; | 886 | int r; |
887 | 887 | ||
888 | spin_lock(&vm->freed_lock); | ||
888 | while (!list_empty(&vm->freed)) { | 889 | while (!list_empty(&vm->freed)) { |
889 | mapping = list_first_entry(&vm->freed, | 890 | mapping = list_first_entry(&vm->freed, |
890 | struct amdgpu_bo_va_mapping, list); | 891 | struct amdgpu_bo_va_mapping, list); |
891 | list_del(&mapping->list); | 892 | list_del(&mapping->list); |
892 | 893 | spin_unlock(&vm->freed_lock); | |
893 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); | 894 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); |
894 | kfree(mapping); | 895 | kfree(mapping); |
895 | if (r) | 896 | if (r) |
896 | return r; | 897 | return r; |
897 | 898 | ||
899 | spin_lock(&vm->freed_lock); | ||
898 | } | 900 | } |
901 | spin_unlock(&vm->freed_lock); | ||
902 | |||
899 | return 0; | 903 | return 0; |
900 | 904 | ||
901 | } | 905 | } |
@@ -922,8 +926,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | |||
922 | bo_va = list_first_entry(&vm->invalidated, | 926 | bo_va = list_first_entry(&vm->invalidated, |
923 | struct amdgpu_bo_va, vm_status); | 927 | struct amdgpu_bo_va, vm_status); |
924 | spin_unlock(&vm->status_lock); | 928 | spin_unlock(&vm->status_lock); |
925 | 929 | mutex_lock(&bo_va->mutex); | |
926 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); | 930 | r = amdgpu_vm_bo_update(adev, bo_va, NULL); |
931 | mutex_unlock(&bo_va->mutex); | ||
927 | if (r) | 932 | if (r) |
928 | return r; | 933 | return r; |
929 | 934 | ||
@@ -967,7 +972,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
967 | INIT_LIST_HEAD(&bo_va->valids); | 972 | INIT_LIST_HEAD(&bo_va->valids); |
968 | INIT_LIST_HEAD(&bo_va->invalids); | 973 | INIT_LIST_HEAD(&bo_va->invalids); |
969 | INIT_LIST_HEAD(&bo_va->vm_status); | 974 | INIT_LIST_HEAD(&bo_va->vm_status); |
970 | 975 | mutex_init(&bo_va->mutex); | |
971 | list_add_tail(&bo_va->bo_list, &bo->va); | 976 | list_add_tail(&bo_va->bo_list, &bo->va); |
972 | 977 | ||
973 | return bo_va; | 978 | return bo_va; |
@@ -1045,7 +1050,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1045 | mapping->offset = offset; | 1050 | mapping->offset = offset; |
1046 | mapping->flags = flags; | 1051 | mapping->flags = flags; |
1047 | 1052 | ||
1053 | mutex_lock(&bo_va->mutex); | ||
1048 | list_add(&mapping->list, &bo_va->invalids); | 1054 | list_add(&mapping->list, &bo_va->invalids); |
1055 | mutex_unlock(&bo_va->mutex); | ||
1049 | spin_lock(&vm->it_lock); | 1056 | spin_lock(&vm->it_lock); |
1050 | interval_tree_insert(&mapping->it, &vm->va); | 1057 | interval_tree_insert(&mapping->it, &vm->va); |
1051 | spin_unlock(&vm->it_lock); | 1058 | spin_unlock(&vm->it_lock); |
@@ -1076,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1076 | if (r) | 1083 | if (r) |
1077 | goto error_free; | 1084 | goto error_free; |
1078 | 1085 | ||
1086 | /* Keep a reference to the page table to avoid freeing | ||
1087 | * them up in the wrong order. | ||
1088 | */ | ||
1089 | pt->parent = amdgpu_bo_ref(vm->page_directory); | ||
1090 | |||
1079 | r = amdgpu_vm_clear_bo(adev, pt); | 1091 | r = amdgpu_vm_clear_bo(adev, pt); |
1080 | if (r) { | 1092 | if (r) { |
1081 | amdgpu_bo_unref(&pt); | 1093 | amdgpu_bo_unref(&pt); |
@@ -1121,7 +1133,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1121 | bool valid = true; | 1133 | bool valid = true; |
1122 | 1134 | ||
1123 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1135 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
1124 | 1136 | mutex_lock(&bo_va->mutex); | |
1125 | list_for_each_entry(mapping, &bo_va->valids, list) { | 1137 | list_for_each_entry(mapping, &bo_va->valids, list) { |
1126 | if (mapping->it.start == saddr) | 1138 | if (mapping->it.start == saddr) |
1127 | break; | 1139 | break; |
@@ -1135,20 +1147,25 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1135 | break; | 1147 | break; |
1136 | } | 1148 | } |
1137 | 1149 | ||
1138 | if (&mapping->list == &bo_va->invalids) | 1150 | if (&mapping->list == &bo_va->invalids) { |
1151 | mutex_unlock(&bo_va->mutex); | ||
1139 | return -ENOENT; | 1152 | return -ENOENT; |
1153 | } | ||
1140 | } | 1154 | } |
1141 | 1155 | mutex_unlock(&bo_va->mutex); | |
1142 | list_del(&mapping->list); | 1156 | list_del(&mapping->list); |
1143 | spin_lock(&vm->it_lock); | 1157 | spin_lock(&vm->it_lock); |
1144 | interval_tree_remove(&mapping->it, &vm->va); | 1158 | interval_tree_remove(&mapping->it, &vm->va); |
1145 | spin_unlock(&vm->it_lock); | 1159 | spin_unlock(&vm->it_lock); |
1146 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1160 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1147 | 1161 | ||
1148 | if (valid) | 1162 | if (valid) { |
1163 | spin_lock(&vm->freed_lock); | ||
1149 | list_add(&mapping->list, &vm->freed); | 1164 | list_add(&mapping->list, &vm->freed); |
1150 | else | 1165 | spin_unlock(&vm->freed_lock); |
1166 | } else { | ||
1151 | kfree(mapping); | 1167 | kfree(mapping); |
1168 | } | ||
1152 | 1169 | ||
1153 | return 0; | 1170 | return 0; |
1154 | } | 1171 | } |
@@ -1181,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1181 | interval_tree_remove(&mapping->it, &vm->va); | 1198 | interval_tree_remove(&mapping->it, &vm->va); |
1182 | spin_unlock(&vm->it_lock); | 1199 | spin_unlock(&vm->it_lock); |
1183 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1200 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1201 | spin_lock(&vm->freed_lock); | ||
1184 | list_add(&mapping->list, &vm->freed); | 1202 | list_add(&mapping->list, &vm->freed); |
1203 | spin_unlock(&vm->freed_lock); | ||
1185 | } | 1204 | } |
1186 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | 1205 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { |
1187 | list_del(&mapping->list); | 1206 | list_del(&mapping->list); |
@@ -1190,8 +1209,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1190 | spin_unlock(&vm->it_lock); | 1209 | spin_unlock(&vm->it_lock); |
1191 | kfree(mapping); | 1210 | kfree(mapping); |
1192 | } | 1211 | } |
1193 | |||
1194 | fence_put(bo_va->last_pt_update); | 1212 | fence_put(bo_va->last_pt_update); |
1213 | mutex_destroy(&bo_va->mutex); | ||
1195 | kfree(bo_va); | 1214 | kfree(bo_va); |
1196 | } | 1215 | } |
1197 | 1216 | ||
@@ -1236,13 +1255,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1236 | vm->ids[i].id = 0; | 1255 | vm->ids[i].id = 0; |
1237 | vm->ids[i].flushed_updates = NULL; | 1256 | vm->ids[i].flushed_updates = NULL; |
1238 | } | 1257 | } |
1239 | mutex_init(&vm->mutex); | ||
1240 | vm->va = RB_ROOT; | 1258 | vm->va = RB_ROOT; |
1241 | spin_lock_init(&vm->status_lock); | 1259 | spin_lock_init(&vm->status_lock); |
1242 | INIT_LIST_HEAD(&vm->invalidated); | 1260 | INIT_LIST_HEAD(&vm->invalidated); |
1243 | INIT_LIST_HEAD(&vm->cleared); | 1261 | INIT_LIST_HEAD(&vm->cleared); |
1244 | INIT_LIST_HEAD(&vm->freed); | 1262 | INIT_LIST_HEAD(&vm->freed); |
1245 | spin_lock_init(&vm->it_lock); | 1263 | spin_lock_init(&vm->it_lock); |
1264 | spin_lock_init(&vm->freed_lock); | ||
1246 | pd_size = amdgpu_vm_directory_size(adev); | 1265 | pd_size = amdgpu_vm_directory_size(adev); |
1247 | pd_entries = amdgpu_vm_num_pdes(adev); | 1266 | pd_entries = amdgpu_vm_num_pdes(adev); |
1248 | 1267 | ||
@@ -1320,7 +1339,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1320 | fence_put(vm->ids[i].flushed_updates); | 1339 | fence_put(vm->ids[i].flushed_updates); |
1321 | } | 1340 | } |
1322 | 1341 | ||
1323 | mutex_destroy(&vm->mutex); | ||
1324 | } | 1342 | } |
1325 | 1343 | ||
1326 | /** | 1344 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index cb0f7747e3dc..4dcc8fba5792 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1250 | u32 pixel_period; | 1250 | u32 pixel_period; |
1251 | u32 line_time = 0; | 1251 | u32 line_time = 0; |
1252 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1252 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1253 | u32 tmp, wm_mask; | 1253 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1254 | 1254 | ||
1255 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1255 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1256 | pixel_period = 1000000 / (u32)mode->clock; | 1256 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1333 | (adev->mode_info.disp_priority == 2)) { | 1333 | (adev->mode_info.disp_priority == 2)) { |
1334 | DRM_DEBUG_KMS("force priority to high\n"); | 1334 | DRM_DEBUG_KMS("force priority to high\n"); |
1335 | } | 1335 | } |
1336 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1336 | } | 1337 | } |
1337 | 1338 | ||
1338 | /* select wm A */ | 1339 | /* select wm A */ |
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1357 | amdgpu_crtc->line_time = line_time; | 1358 | amdgpu_crtc->line_time = line_time; |
1358 | amdgpu_crtc->wm_high = latency_watermark_a; | 1359 | amdgpu_crtc->wm_high = latency_watermark_a; |
1359 | amdgpu_crtc->wm_low = latency_watermark_b; | 1360 | amdgpu_crtc->wm_low = latency_watermark_b; |
1361 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1362 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1360 | } | 1363 | } |
1361 | 1364 | ||
1362 | /** | 1365 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 5af3721851d6..8f1e51128b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1238 | u32 pixel_period; | 1238 | u32 pixel_period; |
1239 | u32 line_time = 0; | 1239 | u32 line_time = 0; |
1240 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1240 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1241 | u32 tmp, wm_mask; | 1241 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1242 | 1242 | ||
1243 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1243 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1244 | pixel_period = 1000000 / (u32)mode->clock; | 1244 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1321 | (adev->mode_info.disp_priority == 2)) { | 1321 | (adev->mode_info.disp_priority == 2)) { |
1322 | DRM_DEBUG_KMS("force priority to high\n"); | 1322 | DRM_DEBUG_KMS("force priority to high\n"); |
1323 | } | 1323 | } |
1324 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1324 | } | 1325 | } |
1325 | 1326 | ||
1326 | /* select wm A */ | 1327 | /* select wm A */ |
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1345 | amdgpu_crtc->line_time = line_time; | 1346 | amdgpu_crtc->line_time = line_time; |
1346 | amdgpu_crtc->wm_high = latency_watermark_a; | 1347 | amdgpu_crtc->wm_high = latency_watermark_a; |
1347 | amdgpu_crtc->wm_low = latency_watermark_b; | 1348 | amdgpu_crtc->wm_low = latency_watermark_b; |
1349 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1350 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1348 | } | 1351 | } |
1349 | 1352 | ||
1350 | /** | 1353 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4f7b49a6dc50..42d954dc436d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1193 | u32 pixel_period; | 1193 | u32 pixel_period; |
1194 | u32 line_time = 0; | 1194 | u32 line_time = 0; |
1195 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1195 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1196 | u32 tmp, wm_mask; | 1196 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1197 | 1197 | ||
1198 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1198 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1199 | pixel_period = 1000000 / (u32)mode->clock; | 1199 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1276 | (adev->mode_info.disp_priority == 2)) { | 1276 | (adev->mode_info.disp_priority == 2)) { |
1277 | DRM_DEBUG_KMS("force priority to high\n"); | 1277 | DRM_DEBUG_KMS("force priority to high\n"); |
1278 | } | 1278 | } |
1279 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1279 | } | 1280 | } |
1280 | 1281 | ||
1281 | /* select wm A */ | 1282 | /* select wm A */ |
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1302 | amdgpu_crtc->line_time = line_time; | 1303 | amdgpu_crtc->line_time = line_time; |
1303 | amdgpu_crtc->wm_high = latency_watermark_a; | 1304 | amdgpu_crtc->wm_high = latency_watermark_a; |
1304 | amdgpu_crtc->wm_low = latency_watermark_b; | 1305 | amdgpu_crtc->wm_low = latency_watermark_b; |
1306 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1307 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1305 | } | 1308 | } |
1306 | 1309 | ||
1307 | /** | 1310 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7427d8cd4c43..ed8abb58a785 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | |||
513 | WREG32(mmVM_L2_CNTL3, tmp); | 513 | WREG32(mmVM_L2_CNTL3, tmp); |
514 | /* setup context0 */ | 514 | /* setup context0 */ |
515 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | 515 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); |
516 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); | 516 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); |
517 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | 517 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); |
518 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 518 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
519 | (u32)(adev->dummy_page.addr >> 12)); | 519 | (u32)(adev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cb0e50ebb528..d39028440814 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |||
657 | WREG32(mmVM_L2_CNTL4, tmp); | 657 | WREG32(mmVM_L2_CNTL4, tmp); |
658 | /* setup context0 */ | 658 | /* setup context0 */ |
659 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | 659 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); |
660 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); | 660 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); |
661 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | 661 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); |
662 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 662 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
663 | (u32)(adev->dummy_page.addr >> 12)); | 663 | (u32)(adev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6a52db6ad8d7..370c6c9d81c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -40,6 +40,9 @@ | |||
40 | 40 | ||
41 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | 41 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 |
42 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | 42 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 |
43 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 | ||
44 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 | ||
45 | #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 | ||
43 | 46 | ||
44 | #define VCE_V3_0_FW_SIZE (384 * 1024) | 47 | #define VCE_V3_0_FW_SIZE (384 * 1024) |
45 | #define VCE_V3_0_STACK_SIZE (64 * 1024) | 48 | #define VCE_V3_0_STACK_SIZE (64 * 1024) |
@@ -130,9 +133,11 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
130 | 133 | ||
131 | /* set BUSY flag */ | 134 | /* set BUSY flag */ |
132 | WREG32_P(mmVCE_STATUS, 1, ~1); | 135 | WREG32_P(mmVCE_STATUS, 1, ~1); |
133 | 136 | if (adev->asic_type >= CHIP_STONEY) | |
134 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, | 137 | WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); |
135 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | 138 | else |
139 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, | ||
140 | ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
136 | 141 | ||
137 | WREG32_P(mmVCE_SOFT_RESET, | 142 | WREG32_P(mmVCE_SOFT_RESET, |
138 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | 143 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, |
@@ -391,8 +396,12 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) | |||
391 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | 396 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); |
392 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | 397 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); |
393 | WREG32(mmVCE_LMI_VM_CTRL, 0); | 398 | WREG32(mmVCE_LMI_VM_CTRL, 0); |
394 | 399 | if (adev->asic_type >= CHIP_STONEY) { | |
395 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | 400 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); |
401 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); | ||
402 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR2, (adev->vce.gpu_addr >> 8)); | ||
403 | } else | ||
404 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | ||
396 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; | 405 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; |
397 | size = VCE_V3_0_FW_SIZE; | 406 | size = VCE_V3_0_FW_SIZE; |
398 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); | 407 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); |
@@ -576,6 +585,11 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, | |||
576 | struct amdgpu_iv_entry *entry) | 585 | struct amdgpu_iv_entry *entry) |
577 | { | 586 | { |
578 | DRM_DEBUG("IH: VCE\n"); | 587 | DRM_DEBUG("IH: VCE\n"); |
588 | |||
589 | WREG32_P(mmVCE_SYS_INT_STATUS, | ||
590 | VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK, | ||
591 | ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK); | ||
592 | |||
579 | switch (entry->src_data) { | 593 | switch (entry->src_data) { |
580 | case 0: | 594 | case 0: |
581 | amdgpu_fence_process(&adev->vce.ring[0]); | 595 | amdgpu_fence_process(&adev->vce.ring[0]); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index ea30d6ad4c13..3a4820e863ec 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -30,8 +30,7 @@ | |||
30 | #define CREATE_TRACE_POINTS | 30 | #define CREATE_TRACE_POINTS |
31 | #include "gpu_sched_trace.h" | 31 | #include "gpu_sched_trace.h" |
32 | 32 | ||
33 | static struct amd_sched_job * | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | ||
35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
36 | 35 | ||
37 | struct kmem_cache *sched_fence_slab; | 36 | struct kmem_cache *sched_fence_slab; |
@@ -64,36 +63,36 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, | |||
64 | } | 63 | } |
65 | 64 | ||
66 | /** | 65 | /** |
67 | * Select next job from a specified run queue with round robin policy. | 66 | * Select an entity which could provide a job to run |
68 | * Return NULL if nothing available. | 67 | * |
68 | * @rq The run queue to check. | ||
69 | * | ||
70 | * Try to find a ready entity, returns NULL if none found. | ||
69 | */ | 71 | */ |
70 | static struct amd_sched_job * | 72 | static struct amd_sched_entity * |
71 | amd_sched_rq_select_job(struct amd_sched_rq *rq) | 73 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) |
72 | { | 74 | { |
73 | struct amd_sched_entity *entity; | 75 | struct amd_sched_entity *entity; |
74 | struct amd_sched_job *sched_job; | ||
75 | 76 | ||
76 | spin_lock(&rq->lock); | 77 | spin_lock(&rq->lock); |
77 | 78 | ||
78 | entity = rq->current_entity; | 79 | entity = rq->current_entity; |
79 | if (entity) { | 80 | if (entity) { |
80 | list_for_each_entry_continue(entity, &rq->entities, list) { | 81 | list_for_each_entry_continue(entity, &rq->entities, list) { |
81 | sched_job = amd_sched_entity_pop_job(entity); | 82 | if (amd_sched_entity_is_ready(entity)) { |
82 | if (sched_job) { | ||
83 | rq->current_entity = entity; | 83 | rq->current_entity = entity; |
84 | spin_unlock(&rq->lock); | 84 | spin_unlock(&rq->lock); |
85 | return sched_job; | 85 | return entity; |
86 | } | 86 | } |
87 | } | 87 | } |
88 | } | 88 | } |
89 | 89 | ||
90 | list_for_each_entry(entity, &rq->entities, list) { | 90 | list_for_each_entry(entity, &rq->entities, list) { |
91 | 91 | ||
92 | sched_job = amd_sched_entity_pop_job(entity); | 92 | if (amd_sched_entity_is_ready(entity)) { |
93 | if (sched_job) { | ||
94 | rq->current_entity = entity; | 93 | rq->current_entity = entity; |
95 | spin_unlock(&rq->lock); | 94 | spin_unlock(&rq->lock); |
96 | return sched_job; | 95 | return entity; |
97 | } | 96 | } |
98 | 97 | ||
99 | if (entity == rq->current_entity) | 98 | if (entity == rq->current_entity) |
@@ -177,6 +176,24 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |||
177 | } | 176 | } |
178 | 177 | ||
179 | /** | 178 | /** |
179 | * Check if entity is ready | ||
180 | * | ||
181 | * @entity The pointer to a valid scheduler entity | ||
182 | * | ||
183 | * Return true if entity could provide a job. | ||
184 | */ | ||
185 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | ||
186 | { | ||
187 | if (kfifo_is_empty(&entity->job_queue)) | ||
188 | return false; | ||
189 | |||
190 | if (ACCESS_ONCE(entity->dependency)) | ||
191 | return false; | ||
192 | |||
193 | return true; | ||
194 | } | ||
195 | |||
196 | /** | ||
180 | * Destroy a context entity | 197 | * Destroy a context entity |
181 | * | 198 | * |
182 | * @sched Pointer to scheduler instance | 199 | * @sched Pointer to scheduler instance |
@@ -211,32 +228,53 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) | |||
211 | amd_sched_wakeup(entity->sched); | 228 | amd_sched_wakeup(entity->sched); |
212 | } | 229 | } |
213 | 230 | ||
231 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) | ||
232 | { | ||
233 | struct amd_gpu_scheduler *sched = entity->sched; | ||
234 | struct fence * fence = entity->dependency; | ||
235 | struct amd_sched_fence *s_fence; | ||
236 | |||
237 | if (fence->context == entity->fence_context) { | ||
238 | /* We can ignore fences from ourself */ | ||
239 | fence_put(entity->dependency); | ||
240 | return false; | ||
241 | } | ||
242 | |||
243 | s_fence = to_amd_sched_fence(fence); | ||
244 | if (s_fence && s_fence->sched == sched) { | ||
245 | /* Fence is from the same scheduler */ | ||
246 | if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { | ||
247 | /* Ignore it when it is already scheduled */ | ||
248 | fence_put(entity->dependency); | ||
249 | return false; | ||
250 | } | ||
251 | |||
252 | /* Wait for fence to be scheduled */ | ||
253 | entity->cb.func = amd_sched_entity_wakeup; | ||
254 | list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); | ||
255 | return true; | ||
256 | } | ||
257 | |||
258 | if (!fence_add_callback(entity->dependency, &entity->cb, | ||
259 | amd_sched_entity_wakeup)) | ||
260 | return true; | ||
261 | |||
262 | fence_put(entity->dependency); | ||
263 | return false; | ||
264 | } | ||
265 | |||
214 | static struct amd_sched_job * | 266 | static struct amd_sched_job * |
215 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | 267 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) |
216 | { | 268 | { |
217 | struct amd_gpu_scheduler *sched = entity->sched; | 269 | struct amd_gpu_scheduler *sched = entity->sched; |
218 | struct amd_sched_job *sched_job; | 270 | struct amd_sched_job *sched_job; |
219 | 271 | ||
220 | if (ACCESS_ONCE(entity->dependency)) | ||
221 | return NULL; | ||
222 | |||
223 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) | 272 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
224 | return NULL; | 273 | return NULL; |
225 | 274 | ||
226 | while ((entity->dependency = sched->ops->dependency(sched_job))) { | 275 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
227 | 276 | if (amd_sched_entity_add_dependency_cb(entity)) | |
228 | if (entity->dependency->context == entity->fence_context) { | ||
229 | /* We can ignore fences from ourself */ | ||
230 | fence_put(entity->dependency); | ||
231 | continue; | ||
232 | } | ||
233 | |||
234 | if (fence_add_callback(entity->dependency, &entity->cb, | ||
235 | amd_sched_entity_wakeup)) | ||
236 | fence_put(entity->dependency); | ||
237 | else | ||
238 | return NULL; | 277 | return NULL; |
239 | } | ||
240 | 278 | ||
241 | return sched_job; | 279 | return sched_job; |
242 | } | 280 | } |
@@ -250,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |||
250 | */ | 288 | */ |
251 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | 289 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
252 | { | 290 | { |
291 | struct amd_gpu_scheduler *sched = sched_job->sched; | ||
253 | struct amd_sched_entity *entity = sched_job->s_entity; | 292 | struct amd_sched_entity *entity = sched_job->s_entity; |
254 | bool added, first = false; | 293 | bool added, first = false; |
255 | 294 | ||
@@ -264,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | |||
264 | 303 | ||
265 | /* first job wakes up scheduler */ | 304 | /* first job wakes up scheduler */ |
266 | if (first) | 305 | if (first) |
267 | amd_sched_wakeup(sched_job->sched); | 306 | amd_sched_wakeup(sched); |
268 | 307 | ||
269 | return added; | 308 | return added; |
270 | } | 309 | } |
@@ -280,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |||
280 | { | 319 | { |
281 | struct amd_sched_entity *entity = sched_job->s_entity; | 320 | struct amd_sched_entity *entity = sched_job->s_entity; |
282 | 321 | ||
322 | trace_amd_sched_job(sched_job); | ||
283 | wait_event(entity->sched->job_scheduled, | 323 | wait_event(entity->sched->job_scheduled, |
284 | amd_sched_entity_in(sched_job)); | 324 | amd_sched_entity_in(sched_job)); |
285 | trace_amd_sched_job(sched_job); | ||
286 | } | 325 | } |
287 | 326 | ||
288 | /** | 327 | /** |
@@ -304,22 +343,22 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |||
304 | } | 343 | } |
305 | 344 | ||
306 | /** | 345 | /** |
307 | * Select next to run | 346 | * Select next entity to process |
308 | */ | 347 | */ |
309 | static struct amd_sched_job * | 348 | static struct amd_sched_entity * |
310 | amd_sched_select_job(struct amd_gpu_scheduler *sched) | 349 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) |
311 | { | 350 | { |
312 | struct amd_sched_job *sched_job; | 351 | struct amd_sched_entity *entity; |
313 | 352 | ||
314 | if (!amd_sched_ready(sched)) | 353 | if (!amd_sched_ready(sched)) |
315 | return NULL; | 354 | return NULL; |
316 | 355 | ||
317 | /* Kernel run queue has higher priority than normal run queue*/ | 356 | /* Kernel run queue has higher priority than normal run queue*/ |
318 | sched_job = amd_sched_rq_select_job(&sched->kernel_rq); | 357 | entity = amd_sched_rq_select_entity(&sched->kernel_rq); |
319 | if (sched_job == NULL) | 358 | if (entity == NULL) |
320 | sched_job = amd_sched_rq_select_job(&sched->sched_rq); | 359 | entity = amd_sched_rq_select_entity(&sched->sched_rq); |
321 | 360 | ||
322 | return sched_job; | 361 | return entity; |
323 | } | 362 | } |
324 | 363 | ||
325 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | 364 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
@@ -381,13 +420,16 @@ static int amd_sched_main(void *param) | |||
381 | unsigned long flags; | 420 | unsigned long flags; |
382 | 421 | ||
383 | wait_event_interruptible(sched->wake_up_worker, | 422 | wait_event_interruptible(sched->wake_up_worker, |
384 | kthread_should_stop() || | 423 | (entity = amd_sched_select_entity(sched)) || |
385 | (sched_job = amd_sched_select_job(sched))); | 424 | kthread_should_stop()); |
386 | 425 | ||
426 | if (!entity) | ||
427 | continue; | ||
428 | |||
429 | sched_job = amd_sched_entity_pop_job(entity); | ||
387 | if (!sched_job) | 430 | if (!sched_job) |
388 | continue; | 431 | continue; |
389 | 432 | ||
390 | entity = sched_job->s_entity; | ||
391 | s_fence = sched_job->s_fence; | 433 | s_fence = sched_job->s_fence; |
392 | 434 | ||
393 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { | 435 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
@@ -400,6 +442,7 @@ static int amd_sched_main(void *param) | |||
400 | 442 | ||
401 | atomic_inc(&sched->hw_rq_count); | 443 | atomic_inc(&sched->hw_rq_count); |
402 | fence = sched->ops->run_job(sched_job); | 444 | fence = sched->ops->run_job(sched_job); |
445 | amd_sched_fence_scheduled(s_fence); | ||
403 | if (fence) { | 446 | if (fence) { |
404 | r = fence_add_callback(fence, &s_fence->cb, | 447 | r = fence_add_callback(fence, &s_fence->cb, |
405 | amd_sched_process_job); | 448 | amd_sched_process_job); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 939692b14f4b..a0f0ae53aacd 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
28 | #include <linux/fence.h> | 28 | #include <linux/fence.h> |
29 | 29 | ||
30 | #define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS | ||
31 | |||
30 | struct amd_gpu_scheduler; | 32 | struct amd_gpu_scheduler; |
31 | struct amd_sched_rq; | 33 | struct amd_sched_rq; |
32 | 34 | ||
@@ -68,6 +70,7 @@ struct amd_sched_rq { | |||
68 | struct amd_sched_fence { | 70 | struct amd_sched_fence { |
69 | struct fence base; | 71 | struct fence base; |
70 | struct fence_cb cb; | 72 | struct fence_cb cb; |
73 | struct list_head scheduled_cb; | ||
71 | struct amd_gpu_scheduler *sched; | 74 | struct amd_gpu_scheduler *sched; |
72 | spinlock_t lock; | 75 | spinlock_t lock; |
73 | void *owner; | 76 | void *owner; |
@@ -134,7 +137,7 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); | |||
134 | 137 | ||
135 | struct amd_sched_fence *amd_sched_fence_create( | 138 | struct amd_sched_fence *amd_sched_fence_create( |
136 | struct amd_sched_entity *s_entity, void *owner); | 139 | struct amd_sched_entity *s_entity, void *owner); |
140 | void amd_sched_fence_scheduled(struct amd_sched_fence *fence); | ||
137 | void amd_sched_fence_signal(struct amd_sched_fence *fence); | 141 | void amd_sched_fence_signal(struct amd_sched_fence *fence); |
138 | 142 | ||
139 | |||
140 | #endif | 143 | #endif |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 8d2130b9ff05..87c78eecea64 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
@@ -35,6 +35,8 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity | |||
35 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); | 35 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); |
36 | if (fence == NULL) | 36 | if (fence == NULL) |
37 | return NULL; | 37 | return NULL; |
38 | |||
39 | INIT_LIST_HEAD(&fence->scheduled_cb); | ||
38 | fence->owner = owner; | 40 | fence->owner = owner; |
39 | fence->sched = s_entity->sched; | 41 | fence->sched = s_entity->sched; |
40 | spin_lock_init(&fence->lock); | 42 | spin_lock_init(&fence->lock); |
@@ -55,6 +57,17 @@ void amd_sched_fence_signal(struct amd_sched_fence *fence) | |||
55 | FENCE_TRACE(&fence->base, "was already signaled\n"); | 57 | FENCE_TRACE(&fence->base, "was already signaled\n"); |
56 | } | 58 | } |
57 | 59 | ||
60 | void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) | ||
61 | { | ||
62 | struct fence_cb *cur, *tmp; | ||
63 | |||
64 | set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); | ||
65 | list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { | ||
66 | list_del_init(&cur->node); | ||
67 | cur->func(&s_fence->base, cur); | ||
68 | } | ||
69 | } | ||
70 | |||
58 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) | 71 | static const char *amd_sched_fence_get_driver_name(struct fence *fence) |
59 | { | 72 | { |
60 | return "amd_sched"; | 73 | return "amd_sched"; |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9362609df38a..7dd6728dd092 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
160 | goto out_unlock; | 160 | goto out_unlock; |
161 | } | 161 | } |
162 | 162 | ||
163 | if (!file_priv->allowed_master) { | ||
164 | ret = drm_new_set_master(dev, file_priv); | ||
165 | goto out_unlock; | ||
166 | } | ||
167 | |||
163 | file_priv->minor->master = drm_master_get(file_priv->master); | 168 | file_priv->minor->master = drm_master_get(file_priv->master); |
164 | file_priv->is_master = 1; | 169 | file_priv->is_master = 1; |
165 | if (dev->driver->master_set) { | 170 | if (dev->driver->master_set) { |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index c59ce4d0ef75..6b5625e66119 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * drm_new_set_master - Allocate a new master object and become master for the | ||
130 | * associated master realm. | ||
131 | * | ||
132 | * @dev: The associated device. | ||
133 | * @fpriv: File private identifying the client. | ||
134 | * | ||
135 | * This function must be called with dev::struct_mutex held. | ||
136 | * Returns negative error code on failure. Zero on success. | ||
137 | */ | ||
138 | int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) | ||
139 | { | ||
140 | struct drm_master *old_master; | ||
141 | int ret; | ||
142 | |||
143 | lockdep_assert_held_once(&dev->master_mutex); | ||
144 | |||
145 | /* create a new master */ | ||
146 | fpriv->minor->master = drm_master_create(fpriv->minor); | ||
147 | if (!fpriv->minor->master) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | /* take another reference for the copy in the local file priv */ | ||
151 | old_master = fpriv->master; | ||
152 | fpriv->master = drm_master_get(fpriv->minor->master); | ||
153 | |||
154 | if (dev->driver->master_create) { | ||
155 | ret = dev->driver->master_create(dev, fpriv->master); | ||
156 | if (ret) | ||
157 | goto out_err; | ||
158 | } | ||
159 | if (dev->driver->master_set) { | ||
160 | ret = dev->driver->master_set(dev, fpriv, true); | ||
161 | if (ret) | ||
162 | goto out_err; | ||
163 | } | ||
164 | |||
165 | fpriv->is_master = 1; | ||
166 | fpriv->allowed_master = 1; | ||
167 | fpriv->authenticated = 1; | ||
168 | if (old_master) | ||
169 | drm_master_put(&old_master); | ||
170 | |||
171 | return 0; | ||
172 | |||
173 | out_err: | ||
174 | /* drop both references and restore old master on failure */ | ||
175 | drm_master_put(&fpriv->minor->master); | ||
176 | drm_master_put(&fpriv->master); | ||
177 | fpriv->master = old_master; | ||
178 | |||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | /** | ||
129 | * Called whenever a process opens /dev/drm. | 183 | * Called whenever a process opens /dev/drm. |
130 | * | 184 | * |
131 | * \param filp file pointer. | 185 | * \param filp file pointer. |
@@ -189,35 +243,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) | |||
189 | mutex_lock(&dev->master_mutex); | 243 | mutex_lock(&dev->master_mutex); |
190 | if (drm_is_primary_client(priv) && !priv->minor->master) { | 244 | if (drm_is_primary_client(priv) && !priv->minor->master) { |
191 | /* create a new master */ | 245 | /* create a new master */ |
192 | priv->minor->master = drm_master_create(priv->minor); | 246 | ret = drm_new_set_master(dev, priv); |
193 | if (!priv->minor->master) { | 247 | if (ret) |
194 | ret = -ENOMEM; | ||
195 | goto out_close; | 248 | goto out_close; |
196 | } | ||
197 | |||
198 | priv->is_master = 1; | ||
199 | /* take another reference for the copy in the local file priv */ | ||
200 | priv->master = drm_master_get(priv->minor->master); | ||
201 | priv->authenticated = 1; | ||
202 | |||
203 | if (dev->driver->master_create) { | ||
204 | ret = dev->driver->master_create(dev, priv->master); | ||
205 | if (ret) { | ||
206 | /* drop both references if this fails */ | ||
207 | drm_master_put(&priv->minor->master); | ||
208 | drm_master_put(&priv->master); | ||
209 | goto out_close; | ||
210 | } | ||
211 | } | ||
212 | if (dev->driver->master_set) { | ||
213 | ret = dev->driver->master_set(dev, priv, true); | ||
214 | if (ret) { | ||
215 | /* drop both references if this fails */ | ||
216 | drm_master_put(&priv->minor->master); | ||
217 | drm_master_put(&priv->master); | ||
218 | goto out_close; | ||
219 | } | ||
220 | } | ||
221 | } else if (drm_is_primary_client(priv)) { | 249 | } else if (drm_is_primary_client(priv)) { |
222 | /* get a reference to the master */ | 250 | /* get a reference to the master */ |
223 | priv->master = drm_master_get(priv->minor->master); | 251 | priv->master = drm_master_get(priv->minor->master); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2151ea551d3b..607f493ae801 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev, | |||
980 | struct drm_pending_vblank_event *e, | 980 | struct drm_pending_vblank_event *e, |
981 | unsigned long seq, struct timeval *now) | 981 | unsigned long seq, struct timeval *now) |
982 | { | 982 | { |
983 | WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); | 983 | assert_spin_locked(&dev->event_lock); |
984 | |||
984 | e->event.sequence = seq; | 985 | e->event.sequence = seq; |
985 | e->event.tv_sec = now->tv_sec; | 986 | e->event.tv_sec = now->tv_sec; |
986 | e->event.tv_usec = now->tv_usec; | 987 | e->event.tv_usec = now->tv_usec; |
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev, | |||
993 | } | 994 | } |
994 | 995 | ||
995 | /** | 996 | /** |
997 | * drm_arm_vblank_event - arm vblank event after pageflip | ||
998 | * @dev: DRM device | ||
999 | * @pipe: CRTC index | ||
1000 | * @e: the event to prepare to send | ||
1001 | * | ||
1002 | * A lot of drivers need to generate vblank events for the very next vblank | ||
1003 | * interrupt. For example when the page flip interrupt happens when the page | ||
1004 | * flip gets armed, but not when it actually executes within the next vblank | ||
1005 | * period. This helper function implements exactly the required vblank arming | ||
1006 | * behaviour. | ||
1007 | * | ||
1008 | * Caller must hold event lock. Caller must also hold a vblank reference for | ||
1009 | * the event @e, which will be dropped when the next vblank arrives. | ||
1010 | * | ||
1011 | * This is the legacy version of drm_crtc_arm_vblank_event(). | ||
1012 | */ | ||
1013 | void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, | ||
1014 | struct drm_pending_vblank_event *e) | ||
1015 | { | ||
1016 | assert_spin_locked(&dev->event_lock); | ||
1017 | |||
1018 | e->pipe = pipe; | ||
1019 | e->event.sequence = drm_vblank_count(dev, pipe); | ||
1020 | list_add_tail(&e->base.link, &dev->vblank_event_list); | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(drm_arm_vblank_event); | ||
1023 | |||
1024 | /** | ||
1025 | * drm_crtc_arm_vblank_event - arm vblank event after pageflip | ||
1026 | * @crtc: the source CRTC of the vblank event | ||
1027 | * @e: the event to send | ||
1028 | * | ||
1029 | * A lot of drivers need to generate vblank events for the very next vblank | ||
1030 | * interrupt. For example when the page flip interrupt happens when the page | ||
1031 | * flip gets armed, but not when it actually executes within the next vblank | ||
1032 | * period. This helper function implements exactly the required vblank arming | ||
1033 | * behaviour. | ||
1034 | * | ||
1035 | * Caller must hold event lock. Caller must also hold a vblank reference for | ||
1036 | * the event @e, which will be dropped when the next vblank arrives. | ||
1037 | * | ||
1038 | * This is the native KMS version of drm_arm_vblank_event(). | ||
1039 | */ | ||
1040 | void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, | ||
1041 | struct drm_pending_vblank_event *e) | ||
1042 | { | ||
1043 | drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); | ||
1044 | } | ||
1045 | EXPORT_SYMBOL(drm_crtc_arm_vblank_event); | ||
1046 | |||
1047 | /** | ||
996 | * drm_send_vblank_event - helper to send vblank event after pageflip | 1048 | * drm_send_vblank_event - helper to send vblank event after pageflip |
997 | * @dev: DRM device | 1049 | * @dev: DRM device |
998 | * @pipe: CRTC index | 1050 | * @pipe: CRTC index |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a18164f2f6d2..f8b5fcfa91a2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
@@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
229 | mode_flags |= DRM_MODE_FLAG_3D_MASK; | 229 | mode_flags |= DRM_MODE_FLAG_3D_MASK; |
230 | 230 | ||
231 | list_for_each_entry(mode, &connector->modes, head) { | 231 | list_for_each_entry(mode, &connector->modes, head) { |
232 | mode->status = drm_mode_validate_basic(mode); | 232 | if (mode->status == MODE_OK) |
233 | mode->status = drm_mode_validate_basic(mode); | ||
233 | 234 | ||
234 | if (mode->status == MODE_OK) | 235 | if (mode->status == MODE_OK) |
235 | mode->status = drm_mode_validate_size(mode, maxX, maxY); | 236 | mode->status = drm_mode_validate_size(mode, maxX, maxY); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index b3ba27fd9a6b..e69357172ffb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc, | |||
55 | { | 55 | { |
56 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 56 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
57 | 57 | ||
58 | if (!state->enable) | ||
59 | return 0; | ||
60 | |||
58 | if (exynos_crtc->ops->atomic_check) | 61 | if (exynos_crtc->ops->atomic_check) |
59 | return exynos_crtc->ops->atomic_check(exynos_crtc, state); | 62 | return exynos_crtc->ops->atomic_check(exynos_crtc, state); |
60 | 63 | ||
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a3b22bdacd44..8aab974b0564 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -2734,6 +2734,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain) | |||
2734 | return "AUX_C"; | 2734 | return "AUX_C"; |
2735 | case POWER_DOMAIN_AUX_D: | 2735 | case POWER_DOMAIN_AUX_D: |
2736 | return "AUX_D"; | 2736 | return "AUX_D"; |
2737 | case POWER_DOMAIN_GMBUS: | ||
2738 | return "GMBUS"; | ||
2737 | case POWER_DOMAIN_INIT: | 2739 | case POWER_DOMAIN_INIT: |
2738 | return "INIT"; | 2740 | return "INIT"; |
2739 | default: | 2741 | default: |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 95bb27de774f..f4af19a0d569 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -199,6 +199,7 @@ enum intel_display_power_domain { | |||
199 | POWER_DOMAIN_AUX_B, | 199 | POWER_DOMAIN_AUX_B, |
200 | POWER_DOMAIN_AUX_C, | 200 | POWER_DOMAIN_AUX_C, |
201 | POWER_DOMAIN_AUX_D, | 201 | POWER_DOMAIN_AUX_D, |
202 | POWER_DOMAIN_GMBUS, | ||
202 | POWER_DOMAIN_INIT, | 203 | POWER_DOMAIN_INIT, |
203 | 204 | ||
204 | POWER_DOMAIN_NUM, | 205 | POWER_DOMAIN_NUM, |
@@ -2192,8 +2193,17 @@ struct drm_i915_gem_request { | |||
2192 | struct drm_i915_private *i915; | 2193 | struct drm_i915_private *i915; |
2193 | struct intel_engine_cs *ring; | 2194 | struct intel_engine_cs *ring; |
2194 | 2195 | ||
2195 | /** GEM sequence number associated with this request. */ | 2196 | /** GEM sequence number associated with the previous request, |
2196 | uint32_t seqno; | 2197 | * when the HWS breadcrumb is equal to this the GPU is processing |
2198 | * this request. | ||
2199 | */ | ||
2200 | u32 previous_seqno; | ||
2201 | |||
2202 | /** GEM sequence number associated with this request, | ||
2203 | * when the HWS breadcrumb is equal or greater than this the GPU | ||
2204 | * has finished processing this request. | ||
2205 | */ | ||
2206 | u32 seqno; | ||
2197 | 2207 | ||
2198 | /** Position in the ringbuffer of the start of the request */ | 2208 | /** Position in the ringbuffer of the start of the request */ |
2199 | u32 head; | 2209 | u32 head; |
@@ -2838,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, | |||
2838 | 2848 | ||
2839 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, | 2849 | int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, |
2840 | u32 flags); | 2850 | u32 flags); |
2851 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); | ||
2841 | int __must_check i915_vma_unbind(struct i915_vma *vma); | 2852 | int __must_check i915_vma_unbind(struct i915_vma *vma); |
2842 | /* | 2853 | /* |
2843 | * BEWARE: Do not use the function below unless you can _absolutely_ | 2854 | * BEWARE: Do not use the function below unless you can _absolutely_ |
@@ -2909,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) | |||
2909 | return (int32_t)(seq1 - seq2) >= 0; | 2920 | return (int32_t)(seq1 - seq2) >= 0; |
2910 | } | 2921 | } |
2911 | 2922 | ||
2923 | static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, | ||
2924 | bool lazy_coherency) | ||
2925 | { | ||
2926 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); | ||
2927 | return i915_seqno_passed(seqno, req->previous_seqno); | ||
2928 | } | ||
2929 | |||
2912 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, | 2930 | static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, |
2913 | bool lazy_coherency) | 2931 | bool lazy_coherency) |
2914 | { | 2932 | { |
2915 | u32 seqno; | 2933 | u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); |
2916 | |||
2917 | BUG_ON(req == NULL); | ||
2918 | |||
2919 | seqno = req->ring->get_seqno(req->ring, lazy_coherency); | ||
2920 | |||
2921 | return i915_seqno_passed(seqno, req->seqno); | 2934 | return i915_seqno_passed(seqno, req->seqno); |
2922 | } | 2935 | } |
2923 | 2936 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 91bb1fc27420..f56af0aaafde 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv, | |||
1146 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); | 1146 | return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static int __i915_spin_request(struct drm_i915_gem_request *req) | 1149 | static unsigned long local_clock_us(unsigned *cpu) |
1150 | { | ||
1151 | unsigned long t; | ||
1152 | |||
1153 | /* Cheaply and approximately convert from nanoseconds to microseconds. | ||
1154 | * The result and subsequent calculations are also defined in the same | ||
1155 | * approximate microseconds units. The principal source of timing | ||
1156 | * error here is from the simple truncation. | ||
1157 | * | ||
1158 | * Note that local_clock() is only defined wrt to the current CPU; | ||
1159 | * the comparisons are no longer valid if we switch CPUs. Instead of | ||
1160 | * blocking preemption for the entire busywait, we can detect the CPU | ||
1161 | * switch and use that as indicator of system load and a reason to | ||
1162 | * stop busywaiting, see busywait_stop(). | ||
1163 | */ | ||
1164 | *cpu = get_cpu(); | ||
1165 | t = local_clock() >> 10; | ||
1166 | put_cpu(); | ||
1167 | |||
1168 | return t; | ||
1169 | } | ||
1170 | |||
1171 | static bool busywait_stop(unsigned long timeout, unsigned cpu) | ||
1172 | { | ||
1173 | unsigned this_cpu; | ||
1174 | |||
1175 | if (time_after(local_clock_us(&this_cpu), timeout)) | ||
1176 | return true; | ||
1177 | |||
1178 | return this_cpu != cpu; | ||
1179 | } | ||
1180 | |||
1181 | static int __i915_spin_request(struct drm_i915_gem_request *req, int state) | ||
1150 | { | 1182 | { |
1151 | unsigned long timeout; | 1183 | unsigned long timeout; |
1184 | unsigned cpu; | ||
1185 | |||
1186 | /* When waiting for high frequency requests, e.g. during synchronous | ||
1187 | * rendering split between the CPU and GPU, the finite amount of time | ||
1188 | * required to set up the irq and wait upon it limits the response | ||
1189 | * rate. By busywaiting on the request completion for a short while we | ||
1190 | * can service the high frequency waits as quick as possible. However, | ||
1191 | * if it is a slow request, we want to sleep as quickly as possible. | ||
1192 | * The tradeoff between waiting and sleeping is roughly the time it | ||
1193 | * takes to sleep on a request, on the order of a microsecond. | ||
1194 | */ | ||
1152 | 1195 | ||
1153 | if (i915_gem_request_get_ring(req)->irq_refcount) | 1196 | if (req->ring->irq_refcount) |
1154 | return -EBUSY; | 1197 | return -EBUSY; |
1155 | 1198 | ||
1156 | timeout = jiffies + 1; | 1199 | /* Only spin if we know the GPU is processing this request */ |
1200 | if (!i915_gem_request_started(req, true)) | ||
1201 | return -EAGAIN; | ||
1202 | |||
1203 | timeout = local_clock_us(&cpu) + 5; | ||
1157 | while (!need_resched()) { | 1204 | while (!need_resched()) { |
1158 | if (i915_gem_request_completed(req, true)) | 1205 | if (i915_gem_request_completed(req, true)) |
1159 | return 0; | 1206 | return 0; |
1160 | 1207 | ||
1161 | if (time_after_eq(jiffies, timeout)) | 1208 | if (signal_pending_state(state, current)) |
1209 | break; | ||
1210 | |||
1211 | if (busywait_stop(timeout, cpu)) | ||
1162 | break; | 1212 | break; |
1163 | 1213 | ||
1164 | cpu_relax_lowlatency(); | 1214 | cpu_relax_lowlatency(); |
1165 | } | 1215 | } |
1216 | |||
1166 | if (i915_gem_request_completed(req, false)) | 1217 | if (i915_gem_request_completed(req, false)) |
1167 | return 0; | 1218 | return 0; |
1168 | 1219 | ||
@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1197 | struct drm_i915_private *dev_priv = dev->dev_private; | 1248 | struct drm_i915_private *dev_priv = dev->dev_private; |
1198 | const bool irq_test_in_progress = | 1249 | const bool irq_test_in_progress = |
1199 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); | 1250 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1251 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | ||
1200 | DEFINE_WAIT(wait); | 1252 | DEFINE_WAIT(wait); |
1201 | unsigned long timeout_expire; | 1253 | unsigned long timeout_expire; |
1202 | s64 before, now; | 1254 | s64 before, now; |
@@ -1210,8 +1262,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1210 | if (i915_gem_request_completed(req, true)) | 1262 | if (i915_gem_request_completed(req, true)) |
1211 | return 0; | 1263 | return 0; |
1212 | 1264 | ||
1213 | timeout_expire = timeout ? | 1265 | timeout_expire = 0; |
1214 | jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; | 1266 | if (timeout) { |
1267 | if (WARN_ON(*timeout < 0)) | ||
1268 | return -EINVAL; | ||
1269 | |||
1270 | if (*timeout == 0) | ||
1271 | return -ETIME; | ||
1272 | |||
1273 | timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); | ||
1274 | } | ||
1215 | 1275 | ||
1216 | if (INTEL_INFO(dev_priv)->gen >= 6) | 1276 | if (INTEL_INFO(dev_priv)->gen >= 6) |
1217 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); | 1277 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); |
@@ -1221,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1221 | before = ktime_get_raw_ns(); | 1281 | before = ktime_get_raw_ns(); |
1222 | 1282 | ||
1223 | /* Optimistic spin for the next jiffie before touching IRQs */ | 1283 | /* Optimistic spin for the next jiffie before touching IRQs */ |
1224 | ret = __i915_spin_request(req); | 1284 | ret = __i915_spin_request(req, state); |
1225 | if (ret == 0) | 1285 | if (ret == 0) |
1226 | goto out; | 1286 | goto out; |
1227 | 1287 | ||
@@ -1233,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1233 | for (;;) { | 1293 | for (;;) { |
1234 | struct timer_list timer; | 1294 | struct timer_list timer; |
1235 | 1295 | ||
1236 | prepare_to_wait(&ring->irq_queue, &wait, | 1296 | prepare_to_wait(&ring->irq_queue, &wait, state); |
1237 | interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
1238 | 1297 | ||
1239 | /* We need to check whether any gpu reset happened in between | 1298 | /* We need to check whether any gpu reset happened in between |
1240 | * the caller grabbing the seqno and now ... */ | 1299 | * the caller grabbing the seqno and now ... */ |
@@ -1252,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1252 | break; | 1311 | break; |
1253 | } | 1312 | } |
1254 | 1313 | ||
1255 | if (interruptible && signal_pending(current)) { | 1314 | if (signal_pending_state(state, current)) { |
1256 | ret = -ERESTARTSYS; | 1315 | ret = -ERESTARTSYS; |
1257 | break; | 1316 | break; |
1258 | } | 1317 | } |
@@ -2546,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, | |||
2546 | request->batch_obj = obj; | 2605 | request->batch_obj = obj; |
2547 | 2606 | ||
2548 | request->emitted_jiffies = jiffies; | 2607 | request->emitted_jiffies = jiffies; |
2608 | request->previous_seqno = ring->last_submitted_seqno; | ||
2549 | ring->last_submitted_seqno = request->seqno; | 2609 | ring->last_submitted_seqno = request->seqno; |
2550 | list_add_tail(&request->list, &ring->request_list); | 2610 | list_add_tail(&request->list, &ring->request_list); |
2551 | 2611 | ||
@@ -4072,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) | |||
4072 | return false; | 4132 | return false; |
4073 | } | 4133 | } |
4074 | 4134 | ||
4135 | void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) | ||
4136 | { | ||
4137 | struct drm_i915_gem_object *obj = vma->obj; | ||
4138 | bool mappable, fenceable; | ||
4139 | u32 fence_size, fence_alignment; | ||
4140 | |||
4141 | fence_size = i915_gem_get_gtt_size(obj->base.dev, | ||
4142 | obj->base.size, | ||
4143 | obj->tiling_mode); | ||
4144 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, | ||
4145 | obj->base.size, | ||
4146 | obj->tiling_mode, | ||
4147 | true); | ||
4148 | |||
4149 | fenceable = (vma->node.size == fence_size && | ||
4150 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
4151 | |||
4152 | mappable = (vma->node.start + fence_size <= | ||
4153 | to_i915(obj->base.dev)->gtt.mappable_end); | ||
4154 | |||
4155 | obj->map_and_fenceable = mappable && fenceable; | ||
4156 | } | ||
4157 | |||
4075 | static int | 4158 | static int |
4076 | i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | 4159 | i915_gem_object_do_pin(struct drm_i915_gem_object *obj, |
4077 | struct i915_address_space *vm, | 4160 | struct i915_address_space *vm, |
@@ -4139,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, | |||
4139 | 4222 | ||
4140 | if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && | 4223 | if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL && |
4141 | (bound ^ vma->bound) & GLOBAL_BIND) { | 4224 | (bound ^ vma->bound) & GLOBAL_BIND) { |
4142 | bool mappable, fenceable; | 4225 | __i915_vma_set_map_and_fenceable(vma); |
4143 | u32 fence_size, fence_alignment; | ||
4144 | |||
4145 | fence_size = i915_gem_get_gtt_size(obj->base.dev, | ||
4146 | obj->base.size, | ||
4147 | obj->tiling_mode); | ||
4148 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, | ||
4149 | obj->base.size, | ||
4150 | obj->tiling_mode, | ||
4151 | true); | ||
4152 | |||
4153 | fenceable = (vma->node.size == fence_size && | ||
4154 | (vma->node.start & (fence_alignment - 1)) == 0); | ||
4155 | |||
4156 | mappable = (vma->node.start + fence_size <= | ||
4157 | dev_priv->gtt.mappable_end); | ||
4158 | |||
4159 | obj->map_and_fenceable = mappable && fenceable; | ||
4160 | |||
4161 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); | 4226 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); |
4162 | } | 4227 | } |
4163 | 4228 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8c688a5f1589..02ceb7a4b481 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx) | |||
141 | if (!ppgtt) | 141 | if (!ppgtt) |
142 | return; | 142 | return; |
143 | 143 | ||
144 | WARN_ON(!list_empty(&ppgtt->base.active_list)); | ||
145 | |||
146 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, | 144 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, |
147 | mm_list) { | 145 | mm_list) { |
148 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) | 146 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) |
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 40a10b25956c..f010391b87f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c | |||
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
642 | } | 642 | } |
643 | 643 | ||
644 | /* check for L-shaped memory aka modified enhanced addressing */ | 644 | /* check for L-shaped memory aka modified enhanced addressing */ |
645 | if (IS_GEN4(dev)) { | 645 | if (IS_GEN4(dev) && |
646 | uint32_t ddc2 = I915_READ(DCC2); | 646 | !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { |
647 | 647 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
648 | if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) | 648 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
649 | dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; | ||
650 | } | 649 | } |
651 | 650 | ||
652 | if (dcc == 0xffffffff) { | 651 | if (dcc == 0xffffffff) { |
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
675 | * matching, which was the case for the swizzling required in | 674 | * matching, which was the case for the swizzling required in |
676 | * the table above, or from the 1-ch value being less than | 675 | * the table above, or from the 1-ch value being less than |
677 | * the minimum size of a rank. | 676 | * the minimum size of a rank. |
677 | * | ||
678 | * Reports indicate that the swizzling actually | ||
679 | * varies depending upon page placement inside the | ||
680 | * channels, i.e. we see swizzled pages where the | ||
681 | * banks of memory are paired and unswizzled on the | ||
682 | * uneven portion, so leave that as unknown. | ||
678 | */ | 683 | */ |
679 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | 684 | if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { |
680 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
681 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
682 | } else { | ||
683 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 685 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
684 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 686 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
685 | } | 687 | } |
686 | } | 688 | } |
687 | 689 | ||
690 | if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || | ||
691 | swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { | ||
692 | /* Userspace likes to explode if it sees unknown swizzling, | ||
693 | * so lie. We will finish the lie when reporting through | ||
694 | * the get-tiling-ioctl by reporting the physical swizzle | ||
695 | * mode as unknown instead. | ||
696 | * | ||
697 | * As we don't strictly know what the swizzling is, it may be | ||
698 | * bit17 dependent, and so we need to also prevent the pages | ||
699 | * from being moved. | ||
700 | */ | ||
701 | dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; | ||
702 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
703 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
704 | } | ||
705 | |||
688 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; | 706 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; |
689 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | 707 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
690 | } | 708 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 43f35d12b677..86c7500454b4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, | |||
2676 | return ret; | 2676 | return ret; |
2677 | } | 2677 | } |
2678 | vma->bound |= GLOBAL_BIND; | 2678 | vma->bound |= GLOBAL_BIND; |
2679 | __i915_vma_set_map_and_fenceable(vma); | ||
2679 | list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); | 2680 | list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); |
2680 | } | 2681 | } |
2681 | 2682 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index cdacf3f5b77a..87e919a06b27 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
687 | } | 687 | } |
688 | 688 | ||
689 | vma->bound |= GLOBAL_BIND; | 689 | vma->bound |= GLOBAL_BIND; |
690 | __i915_vma_set_map_and_fenceable(vma); | ||
690 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); | 691 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
691 | } | 692 | } |
692 | 693 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 71860f8680f9..beb0374a19f1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc); | |||
116 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); | 116 | static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); |
117 | static void ironlake_pfit_enable(struct intel_crtc *crtc); | 117 | static void ironlake_pfit_enable(struct intel_crtc *crtc); |
118 | static void intel_modeset_setup_hw_state(struct drm_device *dev); | 118 | static void intel_modeset_setup_hw_state(struct drm_device *dev); |
119 | static void intel_pre_disable_primary(struct drm_crtc *crtc); | ||
119 | 120 | ||
120 | typedef struct { | 121 | typedef struct { |
121 | int min, max; | 122 | int min, max; |
@@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2607 | struct drm_i915_gem_object *obj; | 2608 | struct drm_i915_gem_object *obj; |
2608 | struct drm_plane *primary = intel_crtc->base.primary; | 2609 | struct drm_plane *primary = intel_crtc->base.primary; |
2609 | struct drm_plane_state *plane_state = primary->state; | 2610 | struct drm_plane_state *plane_state = primary->state; |
2611 | struct drm_crtc_state *crtc_state = intel_crtc->base.state; | ||
2612 | struct intel_plane *intel_plane = to_intel_plane(primary); | ||
2610 | struct drm_framebuffer *fb; | 2613 | struct drm_framebuffer *fb; |
2611 | 2614 | ||
2612 | if (!plane_config->fb) | 2615 | if (!plane_config->fb) |
@@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
2643 | } | 2646 | } |
2644 | } | 2647 | } |
2645 | 2648 | ||
2649 | /* | ||
2650 | * We've failed to reconstruct the BIOS FB. Current display state | ||
2651 | * indicates that the primary plane is visible, but has a NULL FB, | ||
2652 | * which will lead to problems later if we don't fix it up. The | ||
2653 | * simplest solution is to just disable the primary plane now and | ||
2654 | * pretend the BIOS never had it enabled. | ||
2655 | */ | ||
2656 | to_intel_plane_state(plane_state)->visible = false; | ||
2657 | crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); | ||
2658 | intel_pre_disable_primary(&intel_crtc->base); | ||
2659 | intel_plane->disable_plane(primary, &intel_crtc->base); | ||
2660 | |||
2646 | return; | 2661 | return; |
2647 | 2662 | ||
2648 | valid_fb: | 2663 | valid_fb: |
@@ -5194,11 +5209,31 @@ static enum intel_display_power_domain port_to_power_domain(enum port port) | |||
5194 | case PORT_E: | 5209 | case PORT_E: |
5195 | return POWER_DOMAIN_PORT_DDI_E_2_LANES; | 5210 | return POWER_DOMAIN_PORT_DDI_E_2_LANES; |
5196 | default: | 5211 | default: |
5197 | WARN_ON_ONCE(1); | 5212 | MISSING_CASE(port); |
5198 | return POWER_DOMAIN_PORT_OTHER; | 5213 | return POWER_DOMAIN_PORT_OTHER; |
5199 | } | 5214 | } |
5200 | } | 5215 | } |
5201 | 5216 | ||
5217 | static enum intel_display_power_domain port_to_aux_power_domain(enum port port) | ||
5218 | { | ||
5219 | switch (port) { | ||
5220 | case PORT_A: | ||
5221 | return POWER_DOMAIN_AUX_A; | ||
5222 | case PORT_B: | ||
5223 | return POWER_DOMAIN_AUX_B; | ||
5224 | case PORT_C: | ||
5225 | return POWER_DOMAIN_AUX_C; | ||
5226 | case PORT_D: | ||
5227 | return POWER_DOMAIN_AUX_D; | ||
5228 | case PORT_E: | ||
5229 | /* FIXME: Check VBT for actual wiring of PORT E */ | ||
5230 | return POWER_DOMAIN_AUX_D; | ||
5231 | default: | ||
5232 | MISSING_CASE(port); | ||
5233 | return POWER_DOMAIN_AUX_A; | ||
5234 | } | ||
5235 | } | ||
5236 | |||
5202 | #define for_each_power_domain(domain, mask) \ | 5237 | #define for_each_power_domain(domain, mask) \ |
5203 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ | 5238 | for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ |
5204 | if ((1 << (domain)) & (mask)) | 5239 | if ((1 << (domain)) & (mask)) |
@@ -5230,6 +5265,36 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) | |||
5230 | } | 5265 | } |
5231 | } | 5266 | } |
5232 | 5267 | ||
5268 | enum intel_display_power_domain | ||
5269 | intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) | ||
5270 | { | ||
5271 | struct drm_device *dev = intel_encoder->base.dev; | ||
5272 | struct intel_digital_port *intel_dig_port; | ||
5273 | |||
5274 | switch (intel_encoder->type) { | ||
5275 | case INTEL_OUTPUT_UNKNOWN: | ||
5276 | case INTEL_OUTPUT_HDMI: | ||
5277 | /* | ||
5278 | * Only DDI platforms should ever use these output types. | ||
5279 | * We can get here after the HDMI detect code has already set | ||
5280 | * the type of the shared encoder. Since we can't be sure | ||
5281 | * what's the status of the given connectors, play safe and | ||
5282 | * run the DP detection too. | ||
5283 | */ | ||
5284 | WARN_ON_ONCE(!HAS_DDI(dev)); | ||
5285 | case INTEL_OUTPUT_DISPLAYPORT: | ||
5286 | case INTEL_OUTPUT_EDP: | ||
5287 | intel_dig_port = enc_to_dig_port(&intel_encoder->base); | ||
5288 | return port_to_aux_power_domain(intel_dig_port->port); | ||
5289 | case INTEL_OUTPUT_DP_MST: | ||
5290 | intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; | ||
5291 | return port_to_aux_power_domain(intel_dig_port->port); | ||
5292 | default: | ||
5293 | MISSING_CASE(intel_encoder->type); | ||
5294 | return POWER_DOMAIN_AUX_A; | ||
5295 | } | ||
5296 | } | ||
5297 | |||
5233 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) | 5298 | static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) |
5234 | { | 5299 | { |
5235 | struct drm_device *dev = crtc->dev; | 5300 | struct drm_device *dev = crtc->dev; |
@@ -6259,9 +6324,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
6259 | if (to_intel_plane_state(crtc->primary->state)->visible) { | 6324 | if (to_intel_plane_state(crtc->primary->state)->visible) { |
6260 | intel_crtc_wait_for_pending_flips(crtc); | 6325 | intel_crtc_wait_for_pending_flips(crtc); |
6261 | intel_pre_disable_primary(crtc); | 6326 | intel_pre_disable_primary(crtc); |
6327 | |||
6328 | intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); | ||
6329 | to_intel_plane_state(crtc->primary->state)->visible = false; | ||
6262 | } | 6330 | } |
6263 | 6331 | ||
6264 | intel_crtc_disable_planes(crtc, crtc->state->plane_mask); | ||
6265 | dev_priv->display.crtc_disable(crtc); | 6332 | dev_priv->display.crtc_disable(crtc); |
6266 | intel_crtc->active = false; | 6333 | intel_crtc->active = false; |
6267 | intel_update_watermarks(crtc); | 6334 | intel_update_watermarks(crtc); |
@@ -9858,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, | |||
9858 | return true; | 9925 | return true; |
9859 | } | 9926 | } |
9860 | 9927 | ||
9861 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | 9928 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) |
9862 | { | 9929 | { |
9863 | struct drm_device *dev = crtc->dev; | 9930 | struct drm_device *dev = crtc->dev; |
9864 | struct drm_i915_private *dev_priv = dev->dev_private; | 9931 | struct drm_i915_private *dev_priv = dev->dev_private; |
9865 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9932 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9866 | uint32_t cntl = 0, size = 0; | 9933 | uint32_t cntl = 0, size = 0; |
9867 | 9934 | ||
9868 | if (base) { | 9935 | if (on) { |
9869 | unsigned int width = intel_crtc->base.cursor->state->crtc_w; | 9936 | unsigned int width = intel_crtc->base.cursor->state->crtc_w; |
9870 | unsigned int height = intel_crtc->base.cursor->state->crtc_h; | 9937 | unsigned int height = intel_crtc->base.cursor->state->crtc_h; |
9871 | unsigned int stride = roundup_pow_of_two(width) * 4; | 9938 | unsigned int stride = roundup_pow_of_two(width) * 4; |
@@ -9920,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) | |||
9920 | } | 9987 | } |
9921 | } | 9988 | } |
9922 | 9989 | ||
9923 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | 9990 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) |
9924 | { | 9991 | { |
9925 | struct drm_device *dev = crtc->dev; | 9992 | struct drm_device *dev = crtc->dev; |
9926 | struct drm_i915_private *dev_priv = dev->dev_private; | 9993 | struct drm_i915_private *dev_priv = dev->dev_private; |
9927 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 9994 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
9928 | int pipe = intel_crtc->pipe; | 9995 | int pipe = intel_crtc->pipe; |
9929 | uint32_t cntl; | 9996 | uint32_t cntl = 0; |
9930 | 9997 | ||
9931 | cntl = 0; | 9998 | if (on) { |
9932 | if (base) { | ||
9933 | cntl = MCURSOR_GAMMA_ENABLE; | 9999 | cntl = MCURSOR_GAMMA_ENABLE; |
9934 | switch (intel_crtc->base.cursor->state->crtc_w) { | 10000 | switch (intel_crtc->base.cursor->state->crtc_w) { |
9935 | case 64: | 10001 | case 64: |
@@ -9980,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
9980 | int y = cursor_state->crtc_y; | 10046 | int y = cursor_state->crtc_y; |
9981 | u32 base = 0, pos = 0; | 10047 | u32 base = 0, pos = 0; |
9982 | 10048 | ||
9983 | if (on) | 10049 | base = intel_crtc->cursor_addr; |
9984 | base = intel_crtc->cursor_addr; | ||
9985 | 10050 | ||
9986 | if (x >= intel_crtc->config->pipe_src_w) | 10051 | if (x >= intel_crtc->config->pipe_src_w) |
9987 | base = 0; | 10052 | on = false; |
9988 | 10053 | ||
9989 | if (y >= intel_crtc->config->pipe_src_h) | 10054 | if (y >= intel_crtc->config->pipe_src_h) |
9990 | base = 0; | 10055 | on = false; |
9991 | 10056 | ||
9992 | if (x < 0) { | 10057 | if (x < 0) { |
9993 | if (x + cursor_state->crtc_w <= 0) | 10058 | if (x + cursor_state->crtc_w <= 0) |
9994 | base = 0; | 10059 | on = false; |
9995 | 10060 | ||
9996 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | 10061 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; |
9997 | x = -x; | 10062 | x = -x; |
@@ -10000,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10000 | 10065 | ||
10001 | if (y < 0) { | 10066 | if (y < 0) { |
10002 | if (y + cursor_state->crtc_h <= 0) | 10067 | if (y + cursor_state->crtc_h <= 0) |
10003 | base = 0; | 10068 | on = false; |
10004 | 10069 | ||
10005 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | 10070 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; |
10006 | y = -y; | 10071 | y = -y; |
10007 | } | 10072 | } |
10008 | pos |= y << CURSOR_Y_SHIFT; | 10073 | pos |= y << CURSOR_Y_SHIFT; |
10009 | 10074 | ||
10010 | if (base == 0 && intel_crtc->cursor_base == 0) | ||
10011 | return; | ||
10012 | |||
10013 | I915_WRITE(CURPOS(pipe), pos); | 10075 | I915_WRITE(CURPOS(pipe), pos); |
10014 | 10076 | ||
10015 | /* ILK+ do this automagically */ | 10077 | /* ILK+ do this automagically */ |
@@ -10020,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
10020 | } | 10082 | } |
10021 | 10083 | ||
10022 | if (IS_845G(dev) || IS_I865G(dev)) | 10084 | if (IS_845G(dev) || IS_I865G(dev)) |
10023 | i845_update_cursor(crtc, base); | 10085 | i845_update_cursor(crtc, base, on); |
10024 | else | 10086 | else |
10025 | i9xx_update_cursor(crtc, base); | 10087 | i9xx_update_cursor(crtc, base, on); |
10026 | } | 10088 | } |
10027 | 10089 | ||
10028 | static bool cursor_size_ok(struct drm_device *dev, | 10090 | static bool cursor_size_ok(struct drm_device *dev, |
@@ -12460,7 +12522,6 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12460 | if (INTEL_INFO(dev)->gen < 8) { | 12522 | if (INTEL_INFO(dev)->gen < 8) { |
12461 | PIPE_CONF_CHECK_M_N(dp_m_n); | 12523 | PIPE_CONF_CHECK_M_N(dp_m_n); |
12462 | 12524 | ||
12463 | PIPE_CONF_CHECK_I(has_drrs); | ||
12464 | if (current_config->has_drrs) | 12525 | if (current_config->has_drrs) |
12465 | PIPE_CONF_CHECK_M_N(dp_m2_n2); | 12526 | PIPE_CONF_CHECK_M_N(dp_m2_n2); |
12466 | } else | 12527 | } else |
@@ -13667,6 +13728,7 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
13667 | struct drm_crtc *crtc = crtc_state->base.crtc; | 13728 | struct drm_crtc *crtc = crtc_state->base.crtc; |
13668 | struct drm_framebuffer *fb = state->base.fb; | 13729 | struct drm_framebuffer *fb = state->base.fb; |
13669 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); | 13730 | struct drm_i915_gem_object *obj = intel_fb_obj(fb); |
13731 | enum pipe pipe = to_intel_plane(plane)->pipe; | ||
13670 | unsigned stride; | 13732 | unsigned stride; |
13671 | int ret; | 13733 | int ret; |
13672 | 13734 | ||
@@ -13700,6 +13762,22 @@ intel_check_cursor_plane(struct drm_plane *plane, | |||
13700 | return -EINVAL; | 13762 | return -EINVAL; |
13701 | } | 13763 | } |
13702 | 13764 | ||
13765 | /* | ||
13766 | * There's something wrong with the cursor on CHV pipe C. | ||
13767 | * If it straddles the left edge of the screen then | ||
13768 | * moving it away from the edge or disabling it often | ||
13769 | * results in a pipe underrun, and often that can lead to | ||
13770 | * dead pipe (constant underrun reported, and it scans | ||
13771 | * out just a solid color). To recover from that, the | ||
13772 | * display power well must be turned off and on again. | ||
13773 | * Refuse the put the cursor into that compromised position. | ||
13774 | */ | ||
13775 | if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && | ||
13776 | state->visible && state->base.crtc_x < 0) { | ||
13777 | DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); | ||
13778 | return -EINVAL; | ||
13779 | } | ||
13780 | |||
13703 | return 0; | 13781 | return 0; |
13704 | } | 13782 | } |
13705 | 13783 | ||
@@ -13723,9 +13801,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
13723 | crtc = crtc ? crtc : plane->crtc; | 13801 | crtc = crtc ? crtc : plane->crtc; |
13724 | intel_crtc = to_intel_crtc(crtc); | 13802 | intel_crtc = to_intel_crtc(crtc); |
13725 | 13803 | ||
13726 | if (intel_crtc->cursor_bo == obj) | ||
13727 | goto update; | ||
13728 | |||
13729 | if (!obj) | 13804 | if (!obj) |
13730 | addr = 0; | 13805 | addr = 0; |
13731 | else if (!INTEL_INFO(dev)->cursor_needs_physical) | 13806 | else if (!INTEL_INFO(dev)->cursor_needs_physical) |
@@ -13734,9 +13809,7 @@ intel_commit_cursor_plane(struct drm_plane *plane, | |||
13734 | addr = obj->phys_handle->busaddr; | 13809 | addr = obj->phys_handle->busaddr; |
13735 | 13810 | ||
13736 | intel_crtc->cursor_addr = addr; | 13811 | intel_crtc->cursor_addr = addr; |
13737 | intel_crtc->cursor_bo = obj; | ||
13738 | 13812 | ||
13739 | update: | ||
13740 | if (crtc->state->active) | 13813 | if (crtc->state->active) |
13741 | intel_crtc_update_cursor(crtc, state->visible); | 13814 | intel_crtc_update_cursor(crtc, state->visible); |
13742 | } | 13815 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 09bdd94ca3ba..78b8ec84d576 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -277,7 +277,7 @@ static void pps_lock(struct intel_dp *intel_dp) | |||
277 | * See vlv_power_sequencer_reset() why we need | 277 | * See vlv_power_sequencer_reset() why we need |
278 | * a power domain reference here. | 278 | * a power domain reference here. |
279 | */ | 279 | */ |
280 | power_domain = intel_display_port_power_domain(encoder); | 280 | power_domain = intel_display_port_aux_power_domain(encoder); |
281 | intel_display_power_get(dev_priv, power_domain); | 281 | intel_display_power_get(dev_priv, power_domain); |
282 | 282 | ||
283 | mutex_lock(&dev_priv->pps_mutex); | 283 | mutex_lock(&dev_priv->pps_mutex); |
@@ -293,7 +293,7 @@ static void pps_unlock(struct intel_dp *intel_dp) | |||
293 | 293 | ||
294 | mutex_unlock(&dev_priv->pps_mutex); | 294 | mutex_unlock(&dev_priv->pps_mutex); |
295 | 295 | ||
296 | power_domain = intel_display_port_power_domain(encoder); | 296 | power_domain = intel_display_port_aux_power_domain(encoder); |
297 | intel_display_power_put(dev_priv, power_domain); | 297 | intel_display_power_put(dev_priv, power_domain); |
298 | } | 298 | } |
299 | 299 | ||
@@ -816,8 +816,6 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
816 | 816 | ||
817 | intel_dp_check_edp(intel_dp); | 817 | intel_dp_check_edp(intel_dp); |
818 | 818 | ||
819 | intel_aux_display_runtime_get(dev_priv); | ||
820 | |||
821 | /* Try to wait for any previous AUX channel activity */ | 819 | /* Try to wait for any previous AUX channel activity */ |
822 | for (try = 0; try < 3; try++) { | 820 | for (try = 0; try < 3; try++) { |
823 | status = I915_READ_NOTRACE(ch_ctl); | 821 | status = I915_READ_NOTRACE(ch_ctl); |
@@ -926,7 +924,6 @@ done: | |||
926 | ret = recv_bytes; | 924 | ret = recv_bytes; |
927 | out: | 925 | out: |
928 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); | 926 | pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
929 | intel_aux_display_runtime_put(dev_priv); | ||
930 | 927 | ||
931 | if (vdd) | 928 | if (vdd) |
932 | edp_panel_vdd_off(intel_dp, false); | 929 | edp_panel_vdd_off(intel_dp, false); |
@@ -1784,7 +1781,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1784 | if (edp_have_panel_vdd(intel_dp)) | 1781 | if (edp_have_panel_vdd(intel_dp)) |
1785 | return need_to_disable; | 1782 | return need_to_disable; |
1786 | 1783 | ||
1787 | power_domain = intel_display_port_power_domain(intel_encoder); | 1784 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
1788 | intel_display_power_get(dev_priv, power_domain); | 1785 | intel_display_power_get(dev_priv, power_domain); |
1789 | 1786 | ||
1790 | DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", | 1787 | DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", |
@@ -1874,7 +1871,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) | |||
1874 | if ((pp & POWER_TARGET_ON) == 0) | 1871 | if ((pp & POWER_TARGET_ON) == 0) |
1875 | intel_dp->last_power_cycle = jiffies; | 1872 | intel_dp->last_power_cycle = jiffies; |
1876 | 1873 | ||
1877 | power_domain = intel_display_port_power_domain(intel_encoder); | 1874 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
1878 | intel_display_power_put(dev_priv, power_domain); | 1875 | intel_display_power_put(dev_priv, power_domain); |
1879 | } | 1876 | } |
1880 | 1877 | ||
@@ -2025,7 +2022,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
2025 | wait_panel_off(intel_dp); | 2022 | wait_panel_off(intel_dp); |
2026 | 2023 | ||
2027 | /* We got a reference when we enabled the VDD. */ | 2024 | /* We got a reference when we enabled the VDD. */ |
2028 | power_domain = intel_display_port_power_domain(intel_encoder); | 2025 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
2029 | intel_display_power_put(dev_priv, power_domain); | 2026 | intel_display_power_put(dev_priv, power_domain); |
2030 | } | 2027 | } |
2031 | 2028 | ||
@@ -4765,26 +4762,6 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) | |||
4765 | intel_dp->has_audio = false; | 4762 | intel_dp->has_audio = false; |
4766 | } | 4763 | } |
4767 | 4764 | ||
4768 | static enum intel_display_power_domain | ||
4769 | intel_dp_power_get(struct intel_dp *dp) | ||
4770 | { | ||
4771 | struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; | ||
4772 | enum intel_display_power_domain power_domain; | ||
4773 | |||
4774 | power_domain = intel_display_port_power_domain(encoder); | ||
4775 | intel_display_power_get(to_i915(encoder->base.dev), power_domain); | ||
4776 | |||
4777 | return power_domain; | ||
4778 | } | ||
4779 | |||
4780 | static void | ||
4781 | intel_dp_power_put(struct intel_dp *dp, | ||
4782 | enum intel_display_power_domain power_domain) | ||
4783 | { | ||
4784 | struct intel_encoder *encoder = &dp_to_dig_port(dp)->base; | ||
4785 | intel_display_power_put(to_i915(encoder->base.dev), power_domain); | ||
4786 | } | ||
4787 | |||
4788 | static enum drm_connector_status | 4765 | static enum drm_connector_status |
4789 | intel_dp_detect(struct drm_connector *connector, bool force) | 4766 | intel_dp_detect(struct drm_connector *connector, bool force) |
4790 | { | 4767 | { |
@@ -4808,7 +4785,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
4808 | return connector_status_disconnected; | 4785 | return connector_status_disconnected; |
4809 | } | 4786 | } |
4810 | 4787 | ||
4811 | power_domain = intel_dp_power_get(intel_dp); | 4788 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
4789 | intel_display_power_get(to_i915(dev), power_domain); | ||
4812 | 4790 | ||
4813 | /* Can't disconnect eDP, but you can close the lid... */ | 4791 | /* Can't disconnect eDP, but you can close the lid... */ |
4814 | if (is_edp(intel_dp)) | 4792 | if (is_edp(intel_dp)) |
@@ -4853,7 +4831,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
4853 | } | 4831 | } |
4854 | 4832 | ||
4855 | out: | 4833 | out: |
4856 | intel_dp_power_put(intel_dp, power_domain); | 4834 | intel_display_power_put(to_i915(dev), power_domain); |
4857 | return status; | 4835 | return status; |
4858 | } | 4836 | } |
4859 | 4837 | ||
@@ -4862,6 +4840,7 @@ intel_dp_force(struct drm_connector *connector) | |||
4862 | { | 4840 | { |
4863 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 4841 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
4864 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; | 4842 | struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; |
4843 | struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); | ||
4865 | enum intel_display_power_domain power_domain; | 4844 | enum intel_display_power_domain power_domain; |
4866 | 4845 | ||
4867 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 4846 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
@@ -4871,11 +4850,12 @@ intel_dp_force(struct drm_connector *connector) | |||
4871 | if (connector->status != connector_status_connected) | 4850 | if (connector->status != connector_status_connected) |
4872 | return; | 4851 | return; |
4873 | 4852 | ||
4874 | power_domain = intel_dp_power_get(intel_dp); | 4853 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
4854 | intel_display_power_get(dev_priv, power_domain); | ||
4875 | 4855 | ||
4876 | intel_dp_set_edid(intel_dp); | 4856 | intel_dp_set_edid(intel_dp); |
4877 | 4857 | ||
4878 | intel_dp_power_put(intel_dp, power_domain); | 4858 | intel_display_power_put(dev_priv, power_domain); |
4879 | 4859 | ||
4880 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | 4860 | if (intel_encoder->type != INTEL_OUTPUT_EDP) |
4881 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; | 4861 | intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; |
@@ -5091,7 +5071,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) | |||
5091 | * indefinitely. | 5071 | * indefinitely. |
5092 | */ | 5072 | */ |
5093 | DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); | 5073 | DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n"); |
5094 | power_domain = intel_display_port_power_domain(&intel_dig_port->base); | 5074 | power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base); |
5095 | intel_display_power_get(dev_priv, power_domain); | 5075 | intel_display_power_get(dev_priv, power_domain); |
5096 | 5076 | ||
5097 | edp_panel_vdd_schedule_off(intel_dp); | 5077 | edp_panel_vdd_schedule_off(intel_dp); |
@@ -5153,7 +5133,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
5153 | enum intel_display_power_domain power_domain; | 5133 | enum intel_display_power_domain power_domain; |
5154 | enum irqreturn ret = IRQ_NONE; | 5134 | enum irqreturn ret = IRQ_NONE; |
5155 | 5135 | ||
5156 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) | 5136 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && |
5137 | intel_dig_port->base.type != INTEL_OUTPUT_HDMI) | ||
5157 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; | 5138 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; |
5158 | 5139 | ||
5159 | if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { | 5140 | if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { |
@@ -5172,7 +5153,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
5172 | port_name(intel_dig_port->port), | 5153 | port_name(intel_dig_port->port), |
5173 | long_hpd ? "long" : "short"); | 5154 | long_hpd ? "long" : "short"); |
5174 | 5155 | ||
5175 | power_domain = intel_display_port_power_domain(intel_encoder); | 5156 | power_domain = intel_display_port_aux_power_domain(intel_encoder); |
5176 | intel_display_power_get(dev_priv, power_domain); | 5157 | intel_display_power_get(dev_priv, power_domain); |
5177 | 5158 | ||
5178 | if (long_hpd) { | 5159 | if (long_hpd) { |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0598932ce623..0d00f07b7163 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -550,7 +550,6 @@ struct intel_crtc { | |||
550 | int adjusted_x; | 550 | int adjusted_x; |
551 | int adjusted_y; | 551 | int adjusted_y; |
552 | 552 | ||
553 | struct drm_i915_gem_object *cursor_bo; | ||
554 | uint32_t cursor_addr; | 553 | uint32_t cursor_addr; |
555 | uint32_t cursor_cntl; | 554 | uint32_t cursor_cntl; |
556 | uint32_t cursor_size; | 555 | uint32_t cursor_size; |
@@ -1169,6 +1168,8 @@ void hsw_enable_ips(struct intel_crtc *crtc); | |||
1169 | void hsw_disable_ips(struct intel_crtc *crtc); | 1168 | void hsw_disable_ips(struct intel_crtc *crtc); |
1170 | enum intel_display_power_domain | 1169 | enum intel_display_power_domain |
1171 | intel_display_port_power_domain(struct intel_encoder *intel_encoder); | 1170 | intel_display_port_power_domain(struct intel_encoder *intel_encoder); |
1171 | enum intel_display_power_domain | ||
1172 | intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder); | ||
1172 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, | 1173 | void intel_mode_from_pipe_config(struct drm_display_mode *mode, |
1173 | struct intel_crtc_state *pipe_config); | 1174 | struct intel_crtc_state *pipe_config); |
1174 | void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); | 1175 | void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); |
@@ -1377,8 +1378,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, | |||
1377 | enum intel_display_power_domain domain); | 1378 | enum intel_display_power_domain domain); |
1378 | void intel_display_power_put(struct drm_i915_private *dev_priv, | 1379 | void intel_display_power_put(struct drm_i915_private *dev_priv, |
1379 | enum intel_display_power_domain domain); | 1380 | enum intel_display_power_domain domain); |
1380 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); | ||
1381 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); | ||
1382 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); | 1381 | void intel_runtime_pm_get(struct drm_i915_private *dev_priv); |
1383 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); | 1382 | void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); |
1384 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); | 1383 | void intel_runtime_pm_put(struct drm_i915_private *dev_priv); |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9eafa191cee2..64086f2d4e26 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1335,21 +1335,17 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force) | |||
1335 | { | 1335 | { |
1336 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | 1336 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1337 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 1337 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1338 | struct intel_encoder *intel_encoder = | ||
1339 | &hdmi_to_dig_port(intel_hdmi)->base; | ||
1340 | enum intel_display_power_domain power_domain; | ||
1341 | struct edid *edid = NULL; | 1338 | struct edid *edid = NULL; |
1342 | bool connected = false; | 1339 | bool connected = false; |
1343 | 1340 | ||
1344 | power_domain = intel_display_port_power_domain(intel_encoder); | 1341 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1345 | intel_display_power_get(dev_priv, power_domain); | ||
1346 | 1342 | ||
1347 | if (force) | 1343 | if (force) |
1348 | edid = drm_get_edid(connector, | 1344 | edid = drm_get_edid(connector, |
1349 | intel_gmbus_get_adapter(dev_priv, | 1345 | intel_gmbus_get_adapter(dev_priv, |
1350 | intel_hdmi->ddc_bus)); | 1346 | intel_hdmi->ddc_bus)); |
1351 | 1347 | ||
1352 | intel_display_power_put(dev_priv, power_domain); | 1348 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); |
1353 | 1349 | ||
1354 | to_intel_connector(connector)->detect_edid = edid; | 1350 | to_intel_connector(connector)->detect_edid = edid; |
1355 | if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { | 1351 | if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { |
@@ -1378,15 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1378 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); | 1374 | struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
1379 | struct drm_i915_private *dev_priv = to_i915(connector->dev); | 1375 | struct drm_i915_private *dev_priv = to_i915(connector->dev); |
1380 | bool live_status = false; | 1376 | bool live_status = false; |
1381 | unsigned int retry = 3; | 1377 | unsigned int try; |
1382 | 1378 | ||
1383 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 1379 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
1384 | connector->base.id, connector->name); | 1380 | connector->base.id, connector->name); |
1385 | 1381 | ||
1386 | while (!live_status && --retry) { | 1382 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
1383 | |||
1384 | for (try = 0; !live_status && try < 4; try++) { | ||
1385 | if (try) | ||
1386 | msleep(10); | ||
1387 | live_status = intel_digital_port_connected(dev_priv, | 1387 | live_status = intel_digital_port_connected(dev_priv, |
1388 | hdmi_to_dig_port(intel_hdmi)); | 1388 | hdmi_to_dig_port(intel_hdmi)); |
1389 | mdelay(10); | ||
1390 | } | 1389 | } |
1391 | 1390 | ||
1392 | if (!live_status) | 1391 | if (!live_status) |
@@ -1402,6 +1401,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) | |||
1402 | } else | 1401 | } else |
1403 | status = connector_status_disconnected; | 1402 | status = connector_status_disconnected; |
1404 | 1403 | ||
1404 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); | ||
1405 | |||
1405 | return status; | 1406 | return status; |
1406 | } | 1407 | } |
1407 | 1408 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1369fc41d039..8324654037b6 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -483,7 +483,7 @@ gmbus_xfer(struct i2c_adapter *adapter, | |||
483 | int i = 0, inc, try = 0; | 483 | int i = 0, inc, try = 0; |
484 | int ret = 0; | 484 | int ret = 0; |
485 | 485 | ||
486 | intel_aux_display_runtime_get(dev_priv); | 486 | intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); |
487 | mutex_lock(&dev_priv->gmbus_mutex); | 487 | mutex_lock(&dev_priv->gmbus_mutex); |
488 | 488 | ||
489 | if (bus->force_bit) { | 489 | if (bus->force_bit) { |
@@ -595,7 +595,9 @@ timeout: | |||
595 | 595 | ||
596 | out: | 596 | out: |
597 | mutex_unlock(&dev_priv->gmbus_mutex); | 597 | mutex_unlock(&dev_priv->gmbus_mutex); |
598 | intel_aux_display_runtime_put(dev_priv); | 598 | |
599 | intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); | ||
600 | |||
599 | return ret; | 601 | return ret; |
600 | } | 602 | } |
601 | 603 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 071a76b9ac52..f091ad12d694 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
4782 | /* 2b: Program RC6 thresholds.*/ | 4782 | /* 2b: Program RC6 thresholds.*/ |
4783 | 4783 | ||
4784 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ | 4784 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ |
4785 | if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | 4785 | if (IS_SKYLAKE(dev)) |
4786 | (INTEL_REVID(dev) <= SKL_REVID_E0))) | ||
4787 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); | 4786 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); |
4788 | else | 4787 | else |
4789 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); | 4788 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); |
@@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
4825 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. | 4824 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. |
4826 | */ | 4825 | */ |
4827 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || | 4826 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || |
4828 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) | 4827 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) |
4829 | I915_WRITE(GEN9_PG_ENABLE, 0); | 4828 | I915_WRITE(GEN9_PG_ENABLE, 0); |
4830 | else | 4829 | else |
4831 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | 4830 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index d89c1d0aa1b7..7e23d65c9b24 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -362,6 +362,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, | |||
362 | BIT(POWER_DOMAIN_AUX_C) | \ | 362 | BIT(POWER_DOMAIN_AUX_C) | \ |
363 | BIT(POWER_DOMAIN_AUDIO) | \ | 363 | BIT(POWER_DOMAIN_AUDIO) | \ |
364 | BIT(POWER_DOMAIN_VGA) | \ | 364 | BIT(POWER_DOMAIN_VGA) | \ |
365 | BIT(POWER_DOMAIN_GMBUS) | \ | ||
365 | BIT(POWER_DOMAIN_INIT)) | 366 | BIT(POWER_DOMAIN_INIT)) |
366 | #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ | 367 | #define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \ |
367 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ | 368 | BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ |
@@ -1483,6 +1484,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, | |||
1483 | BIT(POWER_DOMAIN_AUX_B) | \ | 1484 | BIT(POWER_DOMAIN_AUX_B) | \ |
1484 | BIT(POWER_DOMAIN_AUX_C) | \ | 1485 | BIT(POWER_DOMAIN_AUX_C) | \ |
1485 | BIT(POWER_DOMAIN_AUX_D) | \ | 1486 | BIT(POWER_DOMAIN_AUX_D) | \ |
1487 | BIT(POWER_DOMAIN_GMBUS) | \ | ||
1486 | BIT(POWER_DOMAIN_INIT)) | 1488 | BIT(POWER_DOMAIN_INIT)) |
1487 | #define HSW_DISPLAY_POWER_DOMAINS ( \ | 1489 | #define HSW_DISPLAY_POWER_DOMAINS ( \ |
1488 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ | 1490 | (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \ |
@@ -1845,6 +1847,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) | |||
1845 | i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, | 1847 | i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, |
1846 | i915.disable_power_well); | 1848 | i915.disable_power_well); |
1847 | 1849 | ||
1850 | BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); | ||
1851 | |||
1848 | mutex_init(&power_domains->lock); | 1852 | mutex_init(&power_domains->lock); |
1849 | 1853 | ||
1850 | /* | 1854 | /* |
@@ -2064,36 +2068,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) | |||
2064 | } | 2068 | } |
2065 | 2069 | ||
2066 | /** | 2070 | /** |
2067 | * intel_aux_display_runtime_get - grab an auxiliary power domain reference | ||
2068 | * @dev_priv: i915 device instance | ||
2069 | * | ||
2070 | * This function grabs a power domain reference for the auxiliary power domain | ||
2071 | * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its | ||
2072 | * parents are powered up. Therefore users should only grab a reference to the | ||
2073 | * innermost power domain they need. | ||
2074 | * | ||
2075 | * Any power domain reference obtained by this function must have a symmetric | ||
2076 | * call to intel_aux_display_runtime_put() to release the reference again. | ||
2077 | */ | ||
2078 | void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) | ||
2079 | { | ||
2080 | intel_runtime_pm_get(dev_priv); | ||
2081 | } | ||
2082 | |||
2083 | /** | ||
2084 | * intel_aux_display_runtime_put - release an auxiliary power domain reference | ||
2085 | * @dev_priv: i915 device instance | ||
2086 | * | ||
2087 | * This function drops the auxiliary power domain reference obtained by | ||
2088 | * intel_aux_display_runtime_get() and might power down the corresponding | ||
2089 | * hardware block right away if this is the last reference. | ||
2090 | */ | ||
2091 | void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) | ||
2092 | { | ||
2093 | intel_runtime_pm_put(dev_priv); | ||
2094 | } | ||
2095 | |||
2096 | /** | ||
2097 | * intel_runtime_pm_get - grab a runtime pm reference | 2071 | * intel_runtime_pm_get - grab a runtime pm reference |
2098 | * @dev_priv: i915 device instance | 2072 | * @dev_priv: i915 device instance |
2099 | * | 2073 | * |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 64f16ea779ef..7b990b4e96d2 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
@@ -63,8 +63,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm) | |||
63 | #if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) | 63 | #if IS_ENABLED(CONFIG_DRM_IMX_FB_HELPER) |
64 | struct imx_drm_device *imxdrm = drm->dev_private; | 64 | struct imx_drm_device *imxdrm = drm->dev_private; |
65 | 65 | ||
66 | if (imxdrm->fbhelper) | 66 | drm_fbdev_cma_restore_mode(imxdrm->fbhelper); |
67 | drm_fbdev_cma_restore_mode(imxdrm->fbhelper); | ||
68 | #endif | 67 | #endif |
69 | } | 68 | } |
70 | 69 | ||
@@ -340,7 +339,7 @@ err_kms: | |||
340 | * imx_drm_add_crtc - add a new crtc | 339 | * imx_drm_add_crtc - add a new crtc |
341 | */ | 340 | */ |
342 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | 341 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, |
343 | struct imx_drm_crtc **new_crtc, | 342 | struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, |
344 | const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, | 343 | const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, |
345 | struct device_node *port) | 344 | struct device_node *port) |
346 | { | 345 | { |
@@ -379,7 +378,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | |||
379 | drm_crtc_helper_add(crtc, | 378 | drm_crtc_helper_add(crtc, |
380 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | 379 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); |
381 | 380 | ||
382 | drm_crtc_init(drm, crtc, | 381 | drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, |
383 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | 382 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); |
384 | 383 | ||
385 | return 0; | 384 | return 0; |
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 28e776d8d9d2..83284b4d4be1 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h | |||
@@ -9,6 +9,7 @@ struct drm_display_mode; | |||
9 | struct drm_encoder; | 9 | struct drm_encoder; |
10 | struct drm_fbdev_cma; | 10 | struct drm_fbdev_cma; |
11 | struct drm_framebuffer; | 11 | struct drm_framebuffer; |
12 | struct drm_plane; | ||
12 | struct imx_drm_crtc; | 13 | struct imx_drm_crtc; |
13 | struct platform_device; | 14 | struct platform_device; |
14 | 15 | ||
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs { | |||
24 | }; | 25 | }; |
25 | 26 | ||
26 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | 27 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, |
27 | struct imx_drm_crtc **new_crtc, | 28 | struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, |
28 | const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, | 29 | const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, |
29 | struct device_node *port); | 30 | struct device_node *port); |
30 | int imx_drm_remove_crtc(struct imx_drm_crtc *); | 31 | int imx_drm_remove_crtc(struct imx_drm_crtc *); |
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index e671ad369416..f9597146dc67 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c | |||
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = { | |||
721 | { .compatible = "fsl,imx53-tve", }, | 721 | { .compatible = "fsl,imx53-tve", }, |
722 | { /* sentinel */ } | 722 | { /* sentinel */ } |
723 | }; | 723 | }; |
724 | MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); | ||
724 | 725 | ||
725 | static struct platform_driver imx_tve_driver = { | 726 | static struct platform_driver imx_tve_driver = { |
726 | .probe = imx_tve_probe, | 727 | .probe = imx_tve_probe, |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7bc8301fafff..4ab841eebee1 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) | |||
212 | 212 | ||
213 | spin_lock_irqsave(&drm->event_lock, flags); | 213 | spin_lock_irqsave(&drm->event_lock, flags); |
214 | if (ipu_crtc->page_flip_event) | 214 | if (ipu_crtc->page_flip_event) |
215 | drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); | 215 | drm_crtc_send_vblank_event(&ipu_crtc->base, |
216 | ipu_crtc->page_flip_event); | ||
216 | ipu_crtc->page_flip_event = NULL; | 217 | ipu_crtc->page_flip_event = NULL; |
217 | imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); | 218 | imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); |
218 | spin_unlock_irqrestore(&drm->event_lock, flags); | 219 | spin_unlock_irqrestore(&drm->event_lock, flags); |
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
349 | struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); | 350 | struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); |
350 | int dp = -EINVAL; | 351 | int dp = -EINVAL; |
351 | int ret; | 352 | int ret; |
352 | int id; | ||
353 | 353 | ||
354 | ret = ipu_get_resources(ipu_crtc, pdata); | 354 | ret = ipu_get_resources(ipu_crtc, pdata); |
355 | if (ret) { | 355 | if (ret) { |
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (pdata->dp >= 0) | ||
362 | dp = IPU_DP_FLOW_SYNC_BG; | ||
363 | ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, | ||
364 | DRM_PLANE_TYPE_PRIMARY); | ||
365 | if (IS_ERR(ipu_crtc->plane[0])) { | ||
366 | ret = PTR_ERR(ipu_crtc->plane[0]); | ||
367 | goto err_put_resources; | ||
368 | } | ||
369 | |||
361 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, | 370 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, |
362 | &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); | 371 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, |
372 | ipu_crtc->dev->of_node); | ||
363 | if (ret) { | 373 | if (ret) { |
364 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); | 374 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); |
365 | goto err_put_resources; | 375 | goto err_put_resources; |
366 | } | 376 | } |
367 | 377 | ||
368 | if (pdata->dp >= 0) | ||
369 | dp = IPU_DP_FLOW_SYNC_BG; | ||
370 | id = imx_drm_crtc_id(ipu_crtc->imx_crtc); | ||
371 | ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu, | ||
372 | pdata->dma[0], dp, BIT(id), true); | ||
373 | ret = ipu_plane_get_resources(ipu_crtc->plane[0]); | 378 | ret = ipu_plane_get_resources(ipu_crtc->plane[0]); |
374 | if (ret) { | 379 | if (ret) { |
375 | dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", | 380 | dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", |
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
379 | 384 | ||
380 | /* If this crtc is using the DP, add an overlay plane */ | 385 | /* If this crtc is using the DP, add an overlay plane */ |
381 | if (pdata->dp >= 0 && pdata->dma[1] > 0) { | 386 | if (pdata->dp >= 0 && pdata->dma[1] > 0) { |
382 | ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, | 387 | ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1], |
383 | pdata->dma[1], | 388 | IPU_DP_FLOW_SYNC_FG, |
384 | IPU_DP_FLOW_SYNC_FG, | 389 | drm_crtc_mask(&ipu_crtc->base), |
385 | BIT(id), false); | 390 | DRM_PLANE_TYPE_OVERLAY); |
386 | if (IS_ERR(ipu_crtc->plane[1])) | 391 | if (IS_ERR(ipu_crtc->plane[1])) |
387 | ipu_crtc->plane[1] = NULL; | 392 | ipu_crtc->plane[1] = NULL; |
388 | } | 393 | } |
@@ -407,28 +412,6 @@ err_put_resources: | |||
407 | return ret; | 412 | return ret; |
408 | } | 413 | } |
409 | 414 | ||
410 | static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent, | ||
411 | int port_id) | ||
412 | { | ||
413 | struct device_node *port; | ||
414 | int id, ret; | ||
415 | |||
416 | port = of_get_child_by_name(parent, "port"); | ||
417 | while (port) { | ||
418 | ret = of_property_read_u32(port, "reg", &id); | ||
419 | if (!ret && id == port_id) | ||
420 | return port; | ||
421 | |||
422 | do { | ||
423 | port = of_get_next_child(parent, port); | ||
424 | if (!port) | ||
425 | return NULL; | ||
426 | } while (of_node_cmp(port->name, "port")); | ||
427 | } | ||
428 | |||
429 | return NULL; | ||
430 | } | ||
431 | |||
432 | static int ipu_drm_bind(struct device *dev, struct device *master, void *data) | 415 | static int ipu_drm_bind(struct device *dev, struct device *master, void *data) |
433 | { | 416 | { |
434 | struct ipu_client_platformdata *pdata = dev->platform_data; | 417 | struct ipu_client_platformdata *pdata = dev->platform_data; |
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = { | |||
470 | static int ipu_drm_probe(struct platform_device *pdev) | 453 | static int ipu_drm_probe(struct platform_device *pdev) |
471 | { | 454 | { |
472 | struct device *dev = &pdev->dev; | 455 | struct device *dev = &pdev->dev; |
473 | struct ipu_client_platformdata *pdata = dev->platform_data; | ||
474 | int ret; | 456 | int ret; |
475 | 457 | ||
476 | if (!dev->platform_data) | 458 | if (!dev->platform_data) |
477 | return -EINVAL; | 459 | return -EINVAL; |
478 | 460 | ||
479 | if (!dev->of_node) { | ||
480 | /* Associate crtc device with the corresponding DI port node */ | ||
481 | dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node, | ||
482 | pdata->di + 2); | ||
483 | if (!dev->of_node) { | ||
484 | dev_err(dev, "missing port@%d node in %s\n", | ||
485 | pdata->di + 2, dev->parent->of_node->full_name); | ||
486 | return -ENODEV; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | 461 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
491 | if (ret) | 462 | if (ret) |
492 | return ret; | 463 | return ret; |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 575f4c84388f..e2ff410bab74 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = { | |||
381 | 381 | ||
382 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | 382 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, |
383 | int dma, int dp, unsigned int possible_crtcs, | 383 | int dma, int dp, unsigned int possible_crtcs, |
384 | bool priv) | 384 | enum drm_plane_type type) |
385 | { | 385 | { |
386 | struct ipu_plane *ipu_plane; | 386 | struct ipu_plane *ipu_plane; |
387 | int ret; | 387 | int ret; |
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | |||
399 | ipu_plane->dma = dma; | 399 | ipu_plane->dma = dma; |
400 | ipu_plane->dp_flow = dp; | 400 | ipu_plane->dp_flow = dp; |
401 | 401 | ||
402 | ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, | 402 | ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, |
403 | &ipu_plane_funcs, ipu_plane_formats, | 403 | &ipu_plane_funcs, ipu_plane_formats, |
404 | ARRAY_SIZE(ipu_plane_formats), | 404 | ARRAY_SIZE(ipu_plane_formats), type); |
405 | priv); | ||
406 | if (ret) { | 405 | if (ret) { |
407 | DRM_ERROR("failed to initialize plane\n"); | 406 | DRM_ERROR("failed to initialize plane\n"); |
408 | kfree(ipu_plane); | 407 | kfree(ipu_plane); |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 9b5eff18f5b8..3a443b413c60 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h | |||
@@ -34,7 +34,7 @@ struct ipu_plane { | |||
34 | 34 | ||
35 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | 35 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, |
36 | int dma, int dp, unsigned int possible_crtcs, | 36 | int dma, int dp, unsigned int possible_crtcs, |
37 | bool priv); | 37 | enum drm_plane_type type); |
38 | 38 | ||
39 | /* Init IDMAC, DMFC, DP */ | 39 | /* Init IDMAC, DMFC, DP */ |
40 | int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, | 40 | int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index b4deb9cf9d71..2e9b9f1b5cd2 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) | |||
54 | 54 | ||
55 | if (imxpd->panel && imxpd->panel->funcs && | 55 | if (imxpd->panel && imxpd->panel->funcs && |
56 | imxpd->panel->funcs->get_modes) { | 56 | imxpd->panel->funcs->get_modes) { |
57 | struct drm_display_info *di = &connector->display_info; | ||
58 | |||
57 | num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); | 59 | num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); |
60 | if (!imxpd->bus_format && di->num_bus_formats) | ||
61 | imxpd->bus_format = di->bus_formats[0]; | ||
58 | if (num_modes > 0) | 62 | if (num_modes > 0) |
59 | return num_modes; | 63 | return num_modes; |
60 | } | 64 | } |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 8f760002e401..913192c94876 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
@@ -159,7 +159,6 @@ struct nvkm_device_func { | |||
159 | struct nvkm_device_quirk { | 159 | struct nvkm_device_quirk { |
160 | u8 tv_pin_mask; | 160 | u8 tv_pin_mask; |
161 | u8 tv_gpio; | 161 | u8 tv_gpio; |
162 | bool War00C800_0; | ||
163 | }; | 162 | }; |
164 | 163 | ||
165 | struct nvkm_device_chip { | 164 | struct nvkm_device_chip { |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h index 28bc202f9753..40f845e31272 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h | |||
@@ -7,6 +7,7 @@ struct nvkm_instmem { | |||
7 | const struct nvkm_instmem_func *func; | 7 | const struct nvkm_instmem_func *func; |
8 | struct nvkm_subdev subdev; | 8 | struct nvkm_subdev subdev; |
9 | 9 | ||
10 | spinlock_t lock; | ||
10 | struct list_head list; | 11 | struct list_head list; |
11 | u32 reserved; | 12 | u32 reserved; |
12 | 13 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 8b8332e46f24..d5e6938cc6bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c | |||
@@ -367,6 +367,7 @@ static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios, | |||
367 | return -ENODEV; | 367 | return -ENODEV; |
368 | } | 368 | } |
369 | obj = (union acpi_object *)buffer.pointer; | 369 | obj = (union acpi_object *)buffer.pointer; |
370 | len = min(len, (int)obj->buffer.length); | ||
370 | memcpy(bios+offset, obj->buffer.pointer, len); | 371 | memcpy(bios+offset, obj->buffer.pointer, len); |
371 | kfree(buffer.pointer); | 372 | kfree(buffer.pointer); |
372 | return len; | 373 | return len; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index db6bc6760545..64c8d932d5f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
829 | struct drm_device *dev = drm->dev; | 829 | struct drm_device *dev = drm->dev; |
830 | struct nouveau_page_flip_state *s; | 830 | struct nouveau_page_flip_state *s; |
831 | unsigned long flags; | 831 | unsigned long flags; |
832 | int crtcid = -1; | ||
833 | 832 | ||
834 | spin_lock_irqsave(&dev->event_lock, flags); | 833 | spin_lock_irqsave(&dev->event_lock, flags); |
835 | 834 | ||
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
841 | 840 | ||
842 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); | 841 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); |
843 | if (s->event) { | 842 | if (s->event) { |
844 | /* Vblank timestamps/counts are only correct on >= NV-50 */ | 843 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
845 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 844 | drm_arm_vblank_event(dev, s->crtc, s->event); |
846 | crtcid = s->crtc; | 845 | } else { |
846 | drm_send_vblank_event(dev, s->crtc, s->event); | ||
847 | 847 | ||
848 | drm_send_vblank_event(dev, crtcid, s->event); | 848 | /* Give up ownership of vblank for page-flipped crtc */ |
849 | drm_vblank_put(dev, s->crtc); | ||
850 | } | ||
851 | } | ||
852 | else { | ||
853 | /* Give up ownership of vblank for page-flipped crtc */ | ||
854 | drm_vblank_put(dev, s->crtc); | ||
849 | } | 855 | } |
850 | |||
851 | /* Give up ownership of vblank for page-flipped crtc */ | ||
852 | drm_vblank_put(dev, s->crtc); | ||
853 | 856 | ||
854 | list_del(&s->head); | 857 | list_del(&s->head); |
855 | if (ps) | 858 | if (ps) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 3050042e6c6d..a02813e994ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | #include <nvif/client.h> | 40 | #include <nvif/client.h> |
41 | #include <nvif/device.h> | 41 | #include <nvif/device.h> |
42 | #include <nvif/ioctl.h> | ||
42 | 43 | ||
43 | #include <drmP.h> | 44 | #include <drmP.h> |
44 | 45 | ||
@@ -65,9 +66,10 @@ struct nouveau_drm_tile { | |||
65 | }; | 66 | }; |
66 | 67 | ||
67 | enum nouveau_drm_object_route { | 68 | enum nouveau_drm_object_route { |
68 | NVDRM_OBJECT_NVIF = 0, | 69 | NVDRM_OBJECT_NVIF = NVIF_IOCTL_V0_OWNER_NVIF, |
69 | NVDRM_OBJECT_USIF, | 70 | NVDRM_OBJECT_USIF, |
70 | NVDRM_OBJECT_ABI16, | 71 | NVDRM_OBJECT_ABI16, |
72 | NVDRM_OBJECT_ANY = NVIF_IOCTL_V0_OWNER_ANY, | ||
71 | }; | 73 | }; |
72 | 74 | ||
73 | enum nouveau_drm_notify_route { | 75 | enum nouveau_drm_notify_route { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 89dc4ce63490..6ae1b3494bcd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c | |||
@@ -313,7 +313,10 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) | |||
313 | if (nvif_unpack(argv->v0, 0, 0, true)) { | 313 | if (nvif_unpack(argv->v0, 0, 0, true)) { |
314 | /* block access to objects not created via this interface */ | 314 | /* block access to objects not created via this interface */ |
315 | owner = argv->v0.owner; | 315 | owner = argv->v0.owner; |
316 | argv->v0.owner = NVDRM_OBJECT_USIF; | 316 | if (argv->v0.object == 0ULL) |
317 | argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ | ||
318 | else | ||
319 | argv->v0.owner = NVDRM_OBJECT_USIF; | ||
317 | } else | 320 | } else |
318 | goto done; | 321 | goto done; |
319 | 322 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index e3c783d0e2ab..62ad0300cfa5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
@@ -259,12 +259,6 @@ nvkm_device_pci_10de_0df4[] = { | |||
259 | }; | 259 | }; |
260 | 260 | ||
261 | static const struct nvkm_device_pci_vendor | 261 | static const struct nvkm_device_pci_vendor |
262 | nvkm_device_pci_10de_0fcd[] = { | ||
263 | { 0x17aa, 0x3801, NULL, { .War00C800_0 = true } }, /* Lenovo Y510P */ | ||
264 | {} | ||
265 | }; | ||
266 | |||
267 | static const struct nvkm_device_pci_vendor | ||
268 | nvkm_device_pci_10de_0fd2[] = { | 262 | nvkm_device_pci_10de_0fd2[] = { |
269 | { 0x1028, 0x0595, "GeForce GT 640M LE" }, | 263 | { 0x1028, 0x0595, "GeForce GT 640M LE" }, |
270 | { 0x1028, 0x05b2, "GeForce GT 640M LE" }, | 264 | { 0x1028, 0x05b2, "GeForce GT 640M LE" }, |
@@ -684,7 +678,6 @@ nvkm_device_pci_10de_1189[] = { | |||
684 | static const struct nvkm_device_pci_vendor | 678 | static const struct nvkm_device_pci_vendor |
685 | nvkm_device_pci_10de_1199[] = { | 679 | nvkm_device_pci_10de_1199[] = { |
686 | { 0x1458, 0xd001, "GeForce GTX 760" }, | 680 | { 0x1458, 0xd001, "GeForce GTX 760" }, |
687 | { 0x1462, 0x1106, "GeForce GTX 780M", { .War00C800_0 = true } }, /* Medion Erazer X7827 */ | ||
688 | {} | 681 | {} |
689 | }; | 682 | }; |
690 | 683 | ||
@@ -695,14 +688,6 @@ nvkm_device_pci_10de_11e3[] = { | |||
695 | }; | 688 | }; |
696 | 689 | ||
697 | static const struct nvkm_device_pci_vendor | 690 | static const struct nvkm_device_pci_vendor |
698 | nvkm_device_pci_10de_11fc[] = { | ||
699 | { 0x1179, 0x0001, NULL, { .War00C800_0 = true } }, /* Toshiba Tecra W50 */ | ||
700 | { 0x17aa, 0x2211, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ | ||
701 | { 0x17aa, 0x221e, NULL, { .War00C800_0 = true } }, /* Lenovo W541 */ | ||
702 | {} | ||
703 | }; | ||
704 | |||
705 | static const struct nvkm_device_pci_vendor | ||
706 | nvkm_device_pci_10de_1247[] = { | 691 | nvkm_device_pci_10de_1247[] = { |
707 | { 0x1043, 0x212a, "GeForce GT 635M" }, | 692 | { 0x1043, 0x212a, "GeForce GT 635M" }, |
708 | { 0x1043, 0x212b, "GeForce GT 635M" }, | 693 | { 0x1043, 0x212b, "GeForce GT 635M" }, |
@@ -1356,7 +1341,7 @@ nvkm_device_pci_10de[] = { | |||
1356 | { 0x0fc6, "GeForce GTX 650" }, | 1341 | { 0x0fc6, "GeForce GTX 650" }, |
1357 | { 0x0fc8, "GeForce GT 740" }, | 1342 | { 0x0fc8, "GeForce GT 740" }, |
1358 | { 0x0fc9, "GeForce GT 730" }, | 1343 | { 0x0fc9, "GeForce GT 730" }, |
1359 | { 0x0fcd, "GeForce GT 755M", nvkm_device_pci_10de_0fcd }, | 1344 | { 0x0fcd, "GeForce GT 755M" }, |
1360 | { 0x0fce, "GeForce GT 640M LE" }, | 1345 | { 0x0fce, "GeForce GT 640M LE" }, |
1361 | { 0x0fd1, "GeForce GT 650M" }, | 1346 | { 0x0fd1, "GeForce GT 650M" }, |
1362 | { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, | 1347 | { 0x0fd2, "GeForce GT 640M", nvkm_device_pci_10de_0fd2 }, |
@@ -1490,7 +1475,7 @@ nvkm_device_pci_10de[] = { | |||
1490 | { 0x11e2, "GeForce GTX 765M" }, | 1475 | { 0x11e2, "GeForce GTX 765M" }, |
1491 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, | 1476 | { 0x11e3, "GeForce GTX 760M", nvkm_device_pci_10de_11e3 }, |
1492 | { 0x11fa, "Quadro K4000" }, | 1477 | { 0x11fa, "Quadro K4000" }, |
1493 | { 0x11fc, "Quadro K2100M", nvkm_device_pci_10de_11fc }, | 1478 | { 0x11fc, "Quadro K2100M" }, |
1494 | { 0x1200, "GeForce GTX 560 Ti" }, | 1479 | { 0x1200, "GeForce GTX 560 Ti" }, |
1495 | { 0x1201, "GeForce GTX 560" }, | 1480 | { 0x1201, "GeForce GTX 560" }, |
1496 | { 0x1203, "GeForce GTX 460 SE v2" }, | 1481 | { 0x1203, "GeForce GTX 460 SE v2" }, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index b5b875928aba..74de7a96c22a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c | |||
@@ -207,6 +207,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info) | |||
207 | const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; | 207 | const u32 b = beta * gr->ppc_tpc_nr[gpc][ppc]; |
208 | const u32 t = timeslice_mode; | 208 | const u32 t = timeslice_mode; |
209 | const u32 o = PPC_UNIT(gpc, ppc, 0); | 209 | const u32 o = PPC_UNIT(gpc, ppc, 0); |
210 | if (!(gr->ppc_mask[gpc] & (1 << ppc))) | ||
211 | continue; | ||
210 | mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); | 212 | mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); |
211 | mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); | 213 | mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); |
212 | bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; | 214 | bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc index 194afe910d21..7dacb3cc0668 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpc.fuc | |||
@@ -52,10 +52,12 @@ mmio_list_base: | |||
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | #ifdef INCLUDE_CODE | 54 | #ifdef INCLUDE_CODE |
55 | #define gpc_addr(reg,addr) /* | ||
56 | */ imm32(reg,addr) /* | ||
57 | */ or reg NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE | ||
55 | #define gpc_wr32(addr,reg) /* | 58 | #define gpc_wr32(addr,reg) /* |
59 | */ gpc_addr($r14,addr) /* | ||
56 | */ mov b32 $r15 reg /* | 60 | */ mov b32 $r15 reg /* |
57 | */ imm32($r14, addr) /* | ||
58 | */ or $r14 NV_PGRAPH_GPCX_GPCCS_MMIO_CTRL_BASE_ENABLE /* | ||
59 | */ call(nv_wr32) | 61 | */ call(nv_wr32) |
60 | 62 | ||
61 | // reports an exception to the host | 63 | // reports an exception to the host |
@@ -161,7 +163,7 @@ init: | |||
161 | 163 | ||
162 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 | 164 | #if NV_PGRAPH_GPCX_UNK__SIZE > 0 |
163 | // figure out which, and how many, UNKs are actually present | 165 | // figure out which, and how many, UNKs are actually present |
164 | imm32($r14, 0x500c30) | 166 | gpc_addr($r14, 0x500c30) |
165 | clear b32 $r2 | 167 | clear b32 $r2 |
166 | clear b32 $r3 | 168 | clear b32 $r3 |
167 | clear b32 $r4 | 169 | clear b32 $r4 |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h index 64d07df4b8b1..bb820ff28621 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h | |||
@@ -314,7 +314,7 @@ uint32_t gf117_grgpc_code[] = { | |||
314 | 0x03f01200, | 314 | 0x03f01200, |
315 | 0x0002d000, | 315 | 0x0002d000, |
316 | 0x17f104bd, | 316 | 0x17f104bd, |
317 | 0x10fe0542, | 317 | 0x10fe0545, |
318 | 0x0007f100, | 318 | 0x0007f100, |
319 | 0x0003f007, | 319 | 0x0003f007, |
320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
@@ -338,184 +338,184 @@ uint32_t gf117_grgpc_code[] = { | |||
338 | 0x02d00103, | 338 | 0x02d00103, |
339 | 0xf104bd00, | 339 | 0xf104bd00, |
340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
344 | 0xb06821f4, | 344 | 0xf444bd34, |
345 | 0x0bf400f6, | 345 | 0xf6b06821, |
346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
351 | 0xb004e0b6, | 351 | 0xb60120b6, |
352 | 0x1bf40126, | 352 | 0x26b004e0, |
353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf401, |
354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
355 | 0xf1080480, | 355 | 0x80070380, |
356 | 0xf0010027, | 356 | 0x27f10804, |
357 | 0x22cf0223, | 357 | 0x23f00100, |
358 | 0x9534bd00, | 358 | 0x0022cf02, |
359 | 0x07f10825, | 359 | 0x259534bd, |
360 | 0x03f0c000, | 360 | 0x0007f108, |
361 | 0x0005d001, | 361 | 0x0103f0c0, |
362 | 0x07f104bd, | 362 | 0xbd0005d0, |
363 | 0x03f0c100, | 363 | 0x0007f104, |
364 | 0x0005d001, | 364 | 0x0103f0c1, |
365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
366 | 0x010f9800, | 366 | 0x000e9804, |
367 | 0x015021f5, | 367 | 0xf5010f98, |
368 | 0xbb002fbb, | 368 | 0xbb015021, |
369 | 0x0e98003f, | 369 | 0x3fbb002f, |
370 | 0x020f9801, | 370 | 0x010e9800, |
371 | 0x015021f5, | 371 | 0xf5020f98, |
372 | 0xfd050e98, | 372 | 0x98015021, |
373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
374 | 0x003ebb00, | 374 | 0x002ebb00, |
375 | 0x98020e98, | 375 | 0x98003ebb, |
376 | 0x21f5030f, | 376 | 0x0f98020e, |
377 | 0x0e980150, | 377 | 0x5021f503, |
378 | 0x00effd07, | 378 | 0x070e9801, |
379 | 0xbb002ebb, | 379 | 0xbb00effd, |
380 | 0x35b6003e, | 380 | 0x3ebb002e, |
381 | 0x0007f102, | 381 | 0x0235b600, |
382 | 0x0103f0d3, | 382 | 0xd30007f1, |
383 | 0xbd0003d0, | 383 | 0xd00103f0, |
384 | 0x0825b604, | ||
385 | 0xb60635b6, | ||
386 | 0x30b60120, | ||
387 | 0x0824b601, | ||
388 | 0xb90834b6, | ||
389 | 0x21f5022f, | ||
390 | 0x2fbb02d3, | ||
391 | 0x003fbb00, | ||
392 | 0x010007f1, | ||
393 | 0xd00203f0, | ||
394 | 0x04bd0003, | 384 | 0x04bd0003, |
395 | 0x29f024bd, | 385 | 0xb60825b6, |
396 | 0x0007f11f, | 386 | 0x20b60635, |
397 | 0x0203f008, | 387 | 0x0130b601, |
398 | 0xbd0002d0, | 388 | 0xb60824b6, |
399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
400 | 0x0031f404, | 390 | 0xd321f502, |
401 | 0xf00028f4, | 391 | 0x002fbb02, |
402 | 0x21f424d7, | 392 | 0xf1003fbb, |
403 | 0xf401f439, | 393 | 0xf0010007, |
404 | 0xf404e4b0, | 394 | 0x03d00203, |
405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
406 | 0x0627f001, | 396 | 0x1f29f024, |
407 | 0x12fd20bd, | 397 | 0x080007f1, |
408 | 0x01e4b604, | 398 | 0xd00203f0, |
409 | 0xfe051efd, | 399 | 0x04bd0002, |
410 | 0x21f50018, | 400 | /* 0x0508: main */ |
411 | 0x0ef405fa, | 401 | 0xf40031f4, |
412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
413 | 0x10ef94d3, | 403 | 0x3921f424, |
414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
415 | 0xf4037e21, | 405 | 0x18f404e4, |
416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
418 | 0xf90188fe, | 408 | 0x0412fd20, |
419 | 0xf990f980, | 409 | 0xfd01e4b6, |
420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
422 | 0xf104bdf0, | 412 | 0xd30ef405, |
423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
424 | 0xaacf00a3, | 414 | 0xf010ef94, |
425 | 0x04abc400, | 415 | 0x21f501f5, |
426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
429 | 0x00eecf00, | 419 | 0x80f90188, |
430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
433 | 0x01e7f004, | 423 | 0xa7f104bd, |
434 | 0x1d0007f1, | 424 | 0xa3f00200, |
435 | 0xd00003f0, | 425 | 0x00aacf00, |
436 | 0x04bd000e, | 426 | 0xf404abc4, |
437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
438 | 0x010007f1, | 428 | 0x00e7f124, |
439 | 0xd00003f0, | 429 | 0x00e3f01a, |
440 | 0x04bd000a, | 430 | 0xf100eecf, |
441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
443 | 0x90fca0fc, | 433 | 0x0421f400, |
444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
445 | 0xf480fc00, | 435 | 0xf01d0007, |
446 | 0x01f80032, | 436 | 0x0ed00003, |
447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
448 | 0x9801f7f0, | 438 | 0xf104bd00, |
449 | 0xfebb040e, | 439 | 0xf0010007, |
450 | 0x02ffb904, | 440 | 0x0ad00003, |
451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
455 | 0xf120f7f0, | 445 | 0x0088fe80, |
446 | 0x32f480fc, | ||
447 | /* 0x05b7: hub_barrier_done */ | ||
448 | 0xf001f800, | ||
449 | 0x0e9801f7, | ||
450 | 0x04febb04, | ||
451 | 0xf102ffb9, | ||
452 | 0xf09418e7, | ||
453 | 0x21f440e3, | ||
454 | /* 0x05cf: ctx_redswitch */ | ||
455 | 0xf000f89d, | ||
456 | 0x07f120f7, | ||
457 | 0x03f08500, | ||
458 | 0x000fd001, | ||
459 | 0xe7f004bd, | ||
460 | /* 0x05e1: ctx_redswitch_delay */ | ||
461 | 0x01e2b608, | ||
462 | 0xf1fd1bf4, | ||
463 | 0xf10800f5, | ||
464 | 0xf10200f5, | ||
456 | 0xf0850007, | 465 | 0xf0850007, |
457 | 0x0fd00103, | 466 | 0x0fd00103, |
458 | 0xf004bd00, | 467 | 0xf804bd00, |
459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
460 | 0xe2b608e7, | 469 | 0x0007f100, |
461 | 0xfd1bf401, | 470 | 0x0203f081, |
462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
463 | 0x0200f5f1, | 472 | 0x0711f404, |
464 | 0x850007f1, | 473 | 0x05cf21f5, |
465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
466 | 0x04bd000f, | 475 | 0x026a21f5, |
467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
468 | 0x07f100f8, | 477 | 0x03f047fc, |
469 | 0x03f08100, | 478 | 0x0002d002, |
470 | 0x000fd002, | 479 | 0x2cf004bd, |
471 | 0x11f404bd, | 480 | 0x0320b601, |
472 | 0xcc21f507, | 481 | 0x4afc07f1, |
473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
474 | 0x6a21f505, | 483 | 0x04bd0002, |
475 | 0xf124bd02, | ||
476 | 0xf047fc07, | ||
477 | 0x02d00203, | ||
478 | 0xf004bd00, | ||
479 | 0x20b6012c, | ||
480 | 0xfc07f103, | ||
481 | 0x0203f04a, | ||
482 | 0xbd0002d0, | ||
483 | 0x01acf004, | ||
484 | 0xf102a5f0, | ||
485 | 0xf00000b7, | ||
486 | 0x0c9850b3, | ||
487 | 0x0fc4b604, | ||
488 | 0x9800bcbb, | ||
489 | 0x0d98000c, | ||
490 | 0x00e7f001, | ||
491 | 0x016f21f5, | ||
492 | 0xf101acf0, | ||
493 | 0xf04000b7, | ||
494 | 0x0c9850b3, | ||
495 | 0x0fc4b604, | ||
496 | 0x9800bcbb, | ||
497 | 0x0d98010c, | ||
498 | 0x060f9802, | ||
499 | 0x0800e7f1, | ||
500 | 0x016f21f5, | ||
501 | 0xf001acf0, | 484 | 0xf001acf0, |
502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
503 | 0xb3f03000, | 486 | 0xb3f00000, |
504 | 0x040c9850, | 487 | 0x040c9850, |
505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
507 | 0x030d9802, | 490 | 0x010d9800, |
508 | 0xf1080f98, | 491 | 0xf500e7f0, |
509 | 0xf50200e7, | 492 | 0xf0016f21, |
510 | 0xf5016f21, | 493 | 0xb7f101ac, |
511 | 0xf4025e21, | 494 | 0xb3f04000, |
512 | 0x12f40601, | 495 | 0x040c9850, |
513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
514 | 0x7f21f507, | 497 | 0x0c9800bc, |
515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
516 | 0xb421f502, | 499 | 0xf1060f98, |
517 | 0x0000f805, | 500 | 0xf50800e7, |
518 | 0x00000000, | 501 | 0xf0016f21, |
502 | 0xa5f001ac, | ||
503 | 0x00b7f104, | ||
504 | 0x50b3f030, | ||
505 | 0xb6040c98, | ||
506 | 0xbcbb0fc4, | ||
507 | 0x020c9800, | ||
508 | 0x98030d98, | ||
509 | 0xe7f1080f, | ||
510 | 0x21f50200, | ||
511 | 0x21f5016f, | ||
512 | 0x01f4025e, | ||
513 | 0x0712f406, | ||
514 | /* 0x06ac: ctx_xfer_post */ | ||
515 | 0x027f21f5, | ||
516 | /* 0x06b0: ctx_xfer_done */ | ||
517 | 0x05b721f5, | ||
518 | 0x000000f8, | ||
519 | 0x00000000, | 519 | 0x00000000, |
520 | 0x00000000, | 520 | 0x00000000, |
521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h index 2f596433c222..911976d20940 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h | |||
@@ -314,7 +314,7 @@ uint32_t gk104_grgpc_code[] = { | |||
314 | 0x03f01200, | 314 | 0x03f01200, |
315 | 0x0002d000, | 315 | 0x0002d000, |
316 | 0x17f104bd, | 316 | 0x17f104bd, |
317 | 0x10fe0542, | 317 | 0x10fe0545, |
318 | 0x0007f100, | 318 | 0x0007f100, |
319 | 0x0003f007, | 319 | 0x0003f007, |
320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
@@ -338,184 +338,184 @@ uint32_t gk104_grgpc_code[] = { | |||
338 | 0x02d00103, | 338 | 0x02d00103, |
339 | 0xf104bd00, | 339 | 0xf104bd00, |
340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
344 | 0xb06821f4, | 344 | 0xf444bd34, |
345 | 0x0bf400f6, | 345 | 0xf6b06821, |
346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
351 | 0xb004e0b6, | 351 | 0xb60120b6, |
352 | 0x1bf40126, | 352 | 0x26b004e0, |
353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf401, |
354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
355 | 0xf1080480, | 355 | 0x80070380, |
356 | 0xf0010027, | 356 | 0x27f10804, |
357 | 0x22cf0223, | 357 | 0x23f00100, |
358 | 0x9534bd00, | 358 | 0x0022cf02, |
359 | 0x07f10825, | 359 | 0x259534bd, |
360 | 0x03f0c000, | 360 | 0x0007f108, |
361 | 0x0005d001, | 361 | 0x0103f0c0, |
362 | 0x07f104bd, | 362 | 0xbd0005d0, |
363 | 0x03f0c100, | 363 | 0x0007f104, |
364 | 0x0005d001, | 364 | 0x0103f0c1, |
365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
366 | 0x010f9800, | 366 | 0x000e9804, |
367 | 0x015021f5, | 367 | 0xf5010f98, |
368 | 0xbb002fbb, | 368 | 0xbb015021, |
369 | 0x0e98003f, | 369 | 0x3fbb002f, |
370 | 0x020f9801, | 370 | 0x010e9800, |
371 | 0x015021f5, | 371 | 0xf5020f98, |
372 | 0xfd050e98, | 372 | 0x98015021, |
373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
374 | 0x003ebb00, | 374 | 0x002ebb00, |
375 | 0x98020e98, | 375 | 0x98003ebb, |
376 | 0x21f5030f, | 376 | 0x0f98020e, |
377 | 0x0e980150, | 377 | 0x5021f503, |
378 | 0x00effd07, | 378 | 0x070e9801, |
379 | 0xbb002ebb, | 379 | 0xbb00effd, |
380 | 0x35b6003e, | 380 | 0x3ebb002e, |
381 | 0x0007f102, | 381 | 0x0235b600, |
382 | 0x0103f0d3, | 382 | 0xd30007f1, |
383 | 0xbd0003d0, | 383 | 0xd00103f0, |
384 | 0x0825b604, | ||
385 | 0xb60635b6, | ||
386 | 0x30b60120, | ||
387 | 0x0824b601, | ||
388 | 0xb90834b6, | ||
389 | 0x21f5022f, | ||
390 | 0x2fbb02d3, | ||
391 | 0x003fbb00, | ||
392 | 0x010007f1, | ||
393 | 0xd00203f0, | ||
394 | 0x04bd0003, | 384 | 0x04bd0003, |
395 | 0x29f024bd, | 385 | 0xb60825b6, |
396 | 0x0007f11f, | 386 | 0x20b60635, |
397 | 0x0203f008, | 387 | 0x0130b601, |
398 | 0xbd0002d0, | 388 | 0xb60824b6, |
399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
400 | 0x0031f404, | 390 | 0xd321f502, |
401 | 0xf00028f4, | 391 | 0x002fbb02, |
402 | 0x21f424d7, | 392 | 0xf1003fbb, |
403 | 0xf401f439, | 393 | 0xf0010007, |
404 | 0xf404e4b0, | 394 | 0x03d00203, |
405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
406 | 0x0627f001, | 396 | 0x1f29f024, |
407 | 0x12fd20bd, | 397 | 0x080007f1, |
408 | 0x01e4b604, | 398 | 0xd00203f0, |
409 | 0xfe051efd, | 399 | 0x04bd0002, |
410 | 0x21f50018, | 400 | /* 0x0508: main */ |
411 | 0x0ef405fa, | 401 | 0xf40031f4, |
412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
413 | 0x10ef94d3, | 403 | 0x3921f424, |
414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
415 | 0xf4037e21, | 405 | 0x18f404e4, |
416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
418 | 0xf90188fe, | 408 | 0x0412fd20, |
419 | 0xf990f980, | 409 | 0xfd01e4b6, |
420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
422 | 0xf104bdf0, | 412 | 0xd30ef405, |
423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
424 | 0xaacf00a3, | 414 | 0xf010ef94, |
425 | 0x04abc400, | 415 | 0x21f501f5, |
426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
429 | 0x00eecf00, | 419 | 0x80f90188, |
430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
433 | 0x01e7f004, | 423 | 0xa7f104bd, |
434 | 0x1d0007f1, | 424 | 0xa3f00200, |
435 | 0xd00003f0, | 425 | 0x00aacf00, |
436 | 0x04bd000e, | 426 | 0xf404abc4, |
437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
438 | 0x010007f1, | 428 | 0x00e7f124, |
439 | 0xd00003f0, | 429 | 0x00e3f01a, |
440 | 0x04bd000a, | 430 | 0xf100eecf, |
441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
443 | 0x90fca0fc, | 433 | 0x0421f400, |
444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
445 | 0xf480fc00, | 435 | 0xf01d0007, |
446 | 0x01f80032, | 436 | 0x0ed00003, |
447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
448 | 0x9801f7f0, | 438 | 0xf104bd00, |
449 | 0xfebb040e, | 439 | 0xf0010007, |
450 | 0x02ffb904, | 440 | 0x0ad00003, |
451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
455 | 0xf120f7f0, | 445 | 0x0088fe80, |
446 | 0x32f480fc, | ||
447 | /* 0x05b7: hub_barrier_done */ | ||
448 | 0xf001f800, | ||
449 | 0x0e9801f7, | ||
450 | 0x04febb04, | ||
451 | 0xf102ffb9, | ||
452 | 0xf09418e7, | ||
453 | 0x21f440e3, | ||
454 | /* 0x05cf: ctx_redswitch */ | ||
455 | 0xf000f89d, | ||
456 | 0x07f120f7, | ||
457 | 0x03f08500, | ||
458 | 0x000fd001, | ||
459 | 0xe7f004bd, | ||
460 | /* 0x05e1: ctx_redswitch_delay */ | ||
461 | 0x01e2b608, | ||
462 | 0xf1fd1bf4, | ||
463 | 0xf10800f5, | ||
464 | 0xf10200f5, | ||
456 | 0xf0850007, | 465 | 0xf0850007, |
457 | 0x0fd00103, | 466 | 0x0fd00103, |
458 | 0xf004bd00, | 467 | 0xf804bd00, |
459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
460 | 0xe2b608e7, | 469 | 0x0007f100, |
461 | 0xfd1bf401, | 470 | 0x0203f081, |
462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
463 | 0x0200f5f1, | 472 | 0x0711f404, |
464 | 0x850007f1, | 473 | 0x05cf21f5, |
465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
466 | 0x04bd000f, | 475 | 0x026a21f5, |
467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
468 | 0x07f100f8, | 477 | 0x03f047fc, |
469 | 0x03f08100, | 478 | 0x0002d002, |
470 | 0x000fd002, | 479 | 0x2cf004bd, |
471 | 0x11f404bd, | 480 | 0x0320b601, |
472 | 0xcc21f507, | 481 | 0x4afc07f1, |
473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
474 | 0x6a21f505, | 483 | 0x04bd0002, |
475 | 0xf124bd02, | ||
476 | 0xf047fc07, | ||
477 | 0x02d00203, | ||
478 | 0xf004bd00, | ||
479 | 0x20b6012c, | ||
480 | 0xfc07f103, | ||
481 | 0x0203f04a, | ||
482 | 0xbd0002d0, | ||
483 | 0x01acf004, | ||
484 | 0xf102a5f0, | ||
485 | 0xf00000b7, | ||
486 | 0x0c9850b3, | ||
487 | 0x0fc4b604, | ||
488 | 0x9800bcbb, | ||
489 | 0x0d98000c, | ||
490 | 0x00e7f001, | ||
491 | 0x016f21f5, | ||
492 | 0xf101acf0, | ||
493 | 0xf04000b7, | ||
494 | 0x0c9850b3, | ||
495 | 0x0fc4b604, | ||
496 | 0x9800bcbb, | ||
497 | 0x0d98010c, | ||
498 | 0x060f9802, | ||
499 | 0x0800e7f1, | ||
500 | 0x016f21f5, | ||
501 | 0xf001acf0, | 484 | 0xf001acf0, |
502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
503 | 0xb3f03000, | 486 | 0xb3f00000, |
504 | 0x040c9850, | 487 | 0x040c9850, |
505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
507 | 0x030d9802, | 490 | 0x010d9800, |
508 | 0xf1080f98, | 491 | 0xf500e7f0, |
509 | 0xf50200e7, | 492 | 0xf0016f21, |
510 | 0xf5016f21, | 493 | 0xb7f101ac, |
511 | 0xf4025e21, | 494 | 0xb3f04000, |
512 | 0x12f40601, | 495 | 0x040c9850, |
513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
514 | 0x7f21f507, | 497 | 0x0c9800bc, |
515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
516 | 0xb421f502, | 499 | 0xf1060f98, |
517 | 0x0000f805, | 500 | 0xf50800e7, |
518 | 0x00000000, | 501 | 0xf0016f21, |
502 | 0xa5f001ac, | ||
503 | 0x00b7f104, | ||
504 | 0x50b3f030, | ||
505 | 0xb6040c98, | ||
506 | 0xbcbb0fc4, | ||
507 | 0x020c9800, | ||
508 | 0x98030d98, | ||
509 | 0xe7f1080f, | ||
510 | 0x21f50200, | ||
511 | 0x21f5016f, | ||
512 | 0x01f4025e, | ||
513 | 0x0712f406, | ||
514 | /* 0x06ac: ctx_xfer_post */ | ||
515 | 0x027f21f5, | ||
516 | /* 0x06b0: ctx_xfer_done */ | ||
517 | 0x05b721f5, | ||
518 | 0x000000f8, | ||
519 | 0x00000000, | 519 | 0x00000000, |
520 | 0x00000000, | 520 | 0x00000000, |
521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h index ee8e54db8fc9..1c6e11b05df2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h | |||
@@ -314,7 +314,7 @@ uint32_t gk110_grgpc_code[] = { | |||
314 | 0x03f01200, | 314 | 0x03f01200, |
315 | 0x0002d000, | 315 | 0x0002d000, |
316 | 0x17f104bd, | 316 | 0x17f104bd, |
317 | 0x10fe0542, | 317 | 0x10fe0545, |
318 | 0x0007f100, | 318 | 0x0007f100, |
319 | 0x0003f007, | 319 | 0x0003f007, |
320 | 0xbd0000d0, | 320 | 0xbd0000d0, |
@@ -338,184 +338,184 @@ uint32_t gk110_grgpc_code[] = { | |||
338 | 0x02d00103, | 338 | 0x02d00103, |
339 | 0xf104bd00, | 339 | 0xf104bd00, |
340 | 0xf00c30e7, | 340 | 0xf00c30e7, |
341 | 0x24bd50e3, | 341 | 0xe5f050e3, |
342 | 0x44bd34bd, | 342 | 0xbd24bd01, |
343 | /* 0x0430: init_unk_loop */ | 343 | /* 0x0433: init_unk_loop */ |
344 | 0xb06821f4, | 344 | 0xf444bd34, |
345 | 0x0bf400f6, | 345 | 0xf6b06821, |
346 | 0x01f7f00f, | 346 | 0x0f0bf400, |
347 | 0xfd04f2bb, | 347 | 0xbb01f7f0, |
348 | 0x30b6054f, | 348 | 0x4ffd04f2, |
349 | /* 0x0445: init_unk_next */ | 349 | 0x0130b605, |
350 | 0x0120b601, | 350 | /* 0x0448: init_unk_next */ |
351 | 0xb004e0b6, | 351 | 0xb60120b6, |
352 | 0x1bf40226, | 352 | 0x26b004e0, |
353 | /* 0x0451: init_unk_done */ | 353 | 0xe21bf402, |
354 | 0x070380e2, | 354 | /* 0x0454: init_unk_done */ |
355 | 0xf1080480, | 355 | 0x80070380, |
356 | 0xf0010027, | 356 | 0x27f10804, |
357 | 0x22cf0223, | 357 | 0x23f00100, |
358 | 0x9534bd00, | 358 | 0x0022cf02, |
359 | 0x07f10825, | 359 | 0x259534bd, |
360 | 0x03f0c000, | 360 | 0x0007f108, |
361 | 0x0005d001, | 361 | 0x0103f0c0, |
362 | 0x07f104bd, | 362 | 0xbd0005d0, |
363 | 0x03f0c100, | 363 | 0x0007f104, |
364 | 0x0005d001, | 364 | 0x0103f0c1, |
365 | 0x0e9804bd, | 365 | 0xbd0005d0, |
366 | 0x010f9800, | 366 | 0x000e9804, |
367 | 0x015021f5, | 367 | 0xf5010f98, |
368 | 0xbb002fbb, | 368 | 0xbb015021, |
369 | 0x0e98003f, | 369 | 0x3fbb002f, |
370 | 0x020f9801, | 370 | 0x010e9800, |
371 | 0x015021f5, | 371 | 0xf5020f98, |
372 | 0xfd050e98, | 372 | 0x98015021, |
373 | 0x2ebb00ef, | 373 | 0xeffd050e, |
374 | 0x003ebb00, | 374 | 0x002ebb00, |
375 | 0x98020e98, | 375 | 0x98003ebb, |
376 | 0x21f5030f, | 376 | 0x0f98020e, |
377 | 0x0e980150, | 377 | 0x5021f503, |
378 | 0x00effd07, | 378 | 0x070e9801, |
379 | 0xbb002ebb, | 379 | 0xbb00effd, |
380 | 0x35b6003e, | 380 | 0x3ebb002e, |
381 | 0x0007f102, | 381 | 0x0235b600, |
382 | 0x0103f0d3, | 382 | 0xd30007f1, |
383 | 0xbd0003d0, | 383 | 0xd00103f0, |
384 | 0x0825b604, | ||
385 | 0xb60635b6, | ||
386 | 0x30b60120, | ||
387 | 0x0824b601, | ||
388 | 0xb90834b6, | ||
389 | 0x21f5022f, | ||
390 | 0x2fbb02d3, | ||
391 | 0x003fbb00, | ||
392 | 0x010007f1, | ||
393 | 0xd00203f0, | ||
394 | 0x04bd0003, | 384 | 0x04bd0003, |
395 | 0x29f024bd, | 385 | 0xb60825b6, |
396 | 0x0007f11f, | 386 | 0x20b60635, |
397 | 0x0203f030, | 387 | 0x0130b601, |
398 | 0xbd0002d0, | 388 | 0xb60824b6, |
399 | /* 0x0505: main */ | 389 | 0x2fb90834, |
400 | 0x0031f404, | 390 | 0xd321f502, |
401 | 0xf00028f4, | 391 | 0x002fbb02, |
402 | 0x21f424d7, | 392 | 0xf1003fbb, |
403 | 0xf401f439, | 393 | 0xf0010007, |
404 | 0xf404e4b0, | 394 | 0x03d00203, |
405 | 0x81fe1e18, | 395 | 0xbd04bd00, |
406 | 0x0627f001, | 396 | 0x1f29f024, |
407 | 0x12fd20bd, | 397 | 0x300007f1, |
408 | 0x01e4b604, | 398 | 0xd00203f0, |
409 | 0xfe051efd, | 399 | 0x04bd0002, |
410 | 0x21f50018, | 400 | /* 0x0508: main */ |
411 | 0x0ef405fa, | 401 | 0xf40031f4, |
412 | /* 0x0535: main_not_ctx_xfer */ | 402 | 0xd7f00028, |
413 | 0x10ef94d3, | 403 | 0x3921f424, |
414 | 0xf501f5f0, | 404 | 0xb0f401f4, |
415 | 0xf4037e21, | 405 | 0x18f404e4, |
416 | /* 0x0542: ih */ | 406 | 0x0181fe1e, |
417 | 0x80f9c60e, | 407 | 0xbd0627f0, |
418 | 0xf90188fe, | 408 | 0x0412fd20, |
419 | 0xf990f980, | 409 | 0xfd01e4b6, |
420 | 0xf9b0f9a0, | 410 | 0x18fe051e, |
421 | 0xf9e0f9d0, | 411 | 0xfd21f500, |
422 | 0xf104bdf0, | 412 | 0xd30ef405, |
423 | 0xf00200a7, | 413 | /* 0x0538: main_not_ctx_xfer */ |
424 | 0xaacf00a3, | 414 | 0xf010ef94, |
425 | 0x04abc400, | 415 | 0x21f501f5, |
426 | 0xf02c0bf4, | 416 | 0x0ef4037e, |
427 | 0xe7f124d7, | 417 | /* 0x0545: ih */ |
428 | 0xe3f01a00, | 418 | 0xfe80f9c6, |
429 | 0x00eecf00, | 419 | 0x80f90188, |
430 | 0x1900f7f1, | 420 | 0xa0f990f9, |
431 | 0xcf00f3f0, | 421 | 0xd0f9b0f9, |
432 | 0x21f400ff, | 422 | 0xf0f9e0f9, |
433 | 0x01e7f004, | 423 | 0xa7f104bd, |
434 | 0x1d0007f1, | 424 | 0xa3f00200, |
435 | 0xd00003f0, | 425 | 0x00aacf00, |
436 | 0x04bd000e, | 426 | 0xf404abc4, |
437 | /* 0x0590: ih_no_fifo */ | 427 | 0xd7f02c0b, |
438 | 0x010007f1, | 428 | 0x00e7f124, |
439 | 0xd00003f0, | 429 | 0x00e3f01a, |
440 | 0x04bd000a, | 430 | 0xf100eecf, |
441 | 0xe0fcf0fc, | 431 | 0xf01900f7, |
442 | 0xb0fcd0fc, | 432 | 0xffcf00f3, |
443 | 0x90fca0fc, | 433 | 0x0421f400, |
444 | 0x88fe80fc, | 434 | 0xf101e7f0, |
445 | 0xf480fc00, | 435 | 0xf01d0007, |
446 | 0x01f80032, | 436 | 0x0ed00003, |
447 | /* 0x05b4: hub_barrier_done */ | 437 | /* 0x0593: ih_no_fifo */ |
448 | 0x9801f7f0, | 438 | 0xf104bd00, |
449 | 0xfebb040e, | 439 | 0xf0010007, |
450 | 0x02ffb904, | 440 | 0x0ad00003, |
451 | 0x9418e7f1, | 441 | 0xfc04bd00, |
452 | 0xf440e3f0, | 442 | 0xfce0fcf0, |
453 | 0x00f89d21, | 443 | 0xfcb0fcd0, |
454 | /* 0x05cc: ctx_redswitch */ | 444 | 0xfc90fca0, |
455 | 0xf120f7f0, | 445 | 0x0088fe80, |
446 | 0x32f480fc, | ||
447 | /* 0x05b7: hub_barrier_done */ | ||
448 | 0xf001f800, | ||
449 | 0x0e9801f7, | ||
450 | 0x04febb04, | ||
451 | 0xf102ffb9, | ||
452 | 0xf09418e7, | ||
453 | 0x21f440e3, | ||
454 | /* 0x05cf: ctx_redswitch */ | ||
455 | 0xf000f89d, | ||
456 | 0x07f120f7, | ||
457 | 0x03f08500, | ||
458 | 0x000fd001, | ||
459 | 0xe7f004bd, | ||
460 | /* 0x05e1: ctx_redswitch_delay */ | ||
461 | 0x01e2b608, | ||
462 | 0xf1fd1bf4, | ||
463 | 0xf10800f5, | ||
464 | 0xf10200f5, | ||
456 | 0xf0850007, | 465 | 0xf0850007, |
457 | 0x0fd00103, | 466 | 0x0fd00103, |
458 | 0xf004bd00, | 467 | 0xf804bd00, |
459 | /* 0x05de: ctx_redswitch_delay */ | 468 | /* 0x05fd: ctx_xfer */ |
460 | 0xe2b608e7, | 469 | 0x0007f100, |
461 | 0xfd1bf401, | 470 | 0x0203f081, |
462 | 0x0800f5f1, | 471 | 0xbd000fd0, |
463 | 0x0200f5f1, | 472 | 0x0711f404, |
464 | 0x850007f1, | 473 | 0x05cf21f5, |
465 | 0xd00103f0, | 474 | /* 0x0610: ctx_xfer_not_load */ |
466 | 0x04bd000f, | 475 | 0x026a21f5, |
467 | /* 0x05fa: ctx_xfer */ | 476 | 0x07f124bd, |
468 | 0x07f100f8, | 477 | 0x03f047fc, |
469 | 0x03f08100, | 478 | 0x0002d002, |
470 | 0x000fd002, | 479 | 0x2cf004bd, |
471 | 0x11f404bd, | 480 | 0x0320b601, |
472 | 0xcc21f507, | 481 | 0x4afc07f1, |
473 | /* 0x060d: ctx_xfer_not_load */ | 482 | 0xd00203f0, |
474 | 0x6a21f505, | 483 | 0x04bd0002, |
475 | 0xf124bd02, | ||
476 | 0xf047fc07, | ||
477 | 0x02d00203, | ||
478 | 0xf004bd00, | ||
479 | 0x20b6012c, | ||
480 | 0xfc07f103, | ||
481 | 0x0203f04a, | ||
482 | 0xbd0002d0, | ||
483 | 0x01acf004, | ||
484 | 0xf102a5f0, | ||
485 | 0xf00000b7, | ||
486 | 0x0c9850b3, | ||
487 | 0x0fc4b604, | ||
488 | 0x9800bcbb, | ||
489 | 0x0d98000c, | ||
490 | 0x00e7f001, | ||
491 | 0x016f21f5, | ||
492 | 0xf101acf0, | ||
493 | 0xf04000b7, | ||
494 | 0x0c9850b3, | ||
495 | 0x0fc4b604, | ||
496 | 0x9800bcbb, | ||
497 | 0x0d98010c, | ||
498 | 0x060f9802, | ||
499 | 0x0800e7f1, | ||
500 | 0x016f21f5, | ||
501 | 0xf001acf0, | 484 | 0xf001acf0, |
502 | 0xb7f104a5, | 485 | 0xb7f102a5, |
503 | 0xb3f03000, | 486 | 0xb3f00000, |
504 | 0x040c9850, | 487 | 0x040c9850, |
505 | 0xbb0fc4b6, | 488 | 0xbb0fc4b6, |
506 | 0x0c9800bc, | 489 | 0x0c9800bc, |
507 | 0x030d9802, | 490 | 0x010d9800, |
508 | 0xf1080f98, | 491 | 0xf500e7f0, |
509 | 0xf50200e7, | 492 | 0xf0016f21, |
510 | 0xf5016f21, | 493 | 0xb7f101ac, |
511 | 0xf4025e21, | 494 | 0xb3f04000, |
512 | 0x12f40601, | 495 | 0x040c9850, |
513 | /* 0x06a9: ctx_xfer_post */ | 496 | 0xbb0fc4b6, |
514 | 0x7f21f507, | 497 | 0x0c9800bc, |
515 | /* 0x06ad: ctx_xfer_done */ | 498 | 0x020d9801, |
516 | 0xb421f502, | 499 | 0xf1060f98, |
517 | 0x0000f805, | 500 | 0xf50800e7, |
518 | 0x00000000, | 501 | 0xf0016f21, |
502 | 0xa5f001ac, | ||
503 | 0x00b7f104, | ||
504 | 0x50b3f030, | ||
505 | 0xb6040c98, | ||
506 | 0xbcbb0fc4, | ||
507 | 0x020c9800, | ||
508 | 0x98030d98, | ||
509 | 0xe7f1080f, | ||
510 | 0x21f50200, | ||
511 | 0x21f5016f, | ||
512 | 0x01f4025e, | ||
513 | 0x0712f406, | ||
514 | /* 0x06ac: ctx_xfer_post */ | ||
515 | 0x027f21f5, | ||
516 | /* 0x06b0: ctx_xfer_done */ | ||
517 | 0x05b721f5, | ||
518 | 0x000000f8, | ||
519 | 0x00000000, | 519 | 0x00000000, |
520 | 0x00000000, | 520 | 0x00000000, |
521 | 0x00000000, | 521 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h index fbcc342f896f..84af7ec6a78e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h | |||
@@ -276,7 +276,7 @@ uint32_t gk208_grgpc_code[] = { | |||
276 | 0x02020014, | 276 | 0x02020014, |
277 | 0xf6120040, | 277 | 0xf6120040, |
278 | 0x04bd0002, | 278 | 0x04bd0002, |
279 | 0xfe048141, | 279 | 0xfe048441, |
280 | 0x00400010, | 280 | 0x00400010, |
281 | 0x0000f607, | 281 | 0x0000f607, |
282 | 0x040204bd, | 282 | 0x040204bd, |
@@ -295,165 +295,165 @@ uint32_t gk208_grgpc_code[] = { | |||
295 | 0x01c90080, | 295 | 0x01c90080, |
296 | 0xbd0002f6, | 296 | 0xbd0002f6, |
297 | 0x0c308e04, | 297 | 0x0c308e04, |
298 | 0xbd24bd50, | 298 | 0x01e5f050, |
299 | /* 0x0383: init_unk_loop */ | 299 | 0x34bd24bd, |
300 | 0x7e44bd34, | 300 | /* 0x0386: init_unk_loop */ |
301 | 0xb0000065, | 301 | 0x657e44bd, |
302 | 0x0bf400f6, | 302 | 0xf6b00000, |
303 | 0xbb010f0e, | 303 | 0x0e0bf400, |
304 | 0x4ffd04f2, | 304 | 0xf2bb010f, |
305 | 0x0130b605, | 305 | 0x054ffd04, |
306 | /* 0x0398: init_unk_next */ | 306 | /* 0x039b: init_unk_next */ |
307 | 0xb60120b6, | 307 | 0xb60130b6, |
308 | 0x26b004e0, | 308 | 0xe0b60120, |
309 | 0xe21bf401, | 309 | 0x0126b004, |
310 | /* 0x03a4: init_unk_done */ | 310 | /* 0x03a7: init_unk_done */ |
311 | 0xb50703b5, | 311 | 0xb5e21bf4, |
312 | 0x00820804, | 312 | 0x04b50703, |
313 | 0x22cf0201, | 313 | 0x01008208, |
314 | 0x9534bd00, | 314 | 0x0022cf02, |
315 | 0x00800825, | 315 | 0x259534bd, |
316 | 0x05f601c0, | 316 | 0xc0008008, |
317 | 0x8004bd00, | 317 | 0x0005f601, |
318 | 0xf601c100, | 318 | 0x008004bd, |
319 | 0x04bd0005, | 319 | 0x05f601c1, |
320 | 0x98000e98, | 320 | 0x9804bd00, |
321 | 0x207e010f, | 321 | 0x0f98000e, |
322 | 0x2fbb0001, | 322 | 0x01207e01, |
323 | 0x003fbb00, | 323 | 0x002fbb00, |
324 | 0x98010e98, | 324 | 0x98003fbb, |
325 | 0x207e020f, | 325 | 0x0f98010e, |
326 | 0x0e980001, | 326 | 0x01207e02, |
327 | 0x00effd05, | 327 | 0x050e9800, |
328 | 0xbb002ebb, | 328 | 0xbb00effd, |
329 | 0x0e98003e, | 329 | 0x3ebb002e, |
330 | 0x030f9802, | 330 | 0x020e9800, |
331 | 0x0001207e, | 331 | 0x7e030f98, |
332 | 0xfd070e98, | 332 | 0x98000120, |
333 | 0x2ebb00ef, | 333 | 0xeffd070e, |
334 | 0x003ebb00, | 334 | 0x002ebb00, |
335 | 0x800235b6, | 335 | 0xb6003ebb, |
336 | 0xf601d300, | 336 | 0x00800235, |
337 | 0x04bd0003, | 337 | 0x03f601d3, |
338 | 0xb60825b6, | 338 | 0xb604bd00, |
339 | 0x20b60635, | 339 | 0x35b60825, |
340 | 0x0130b601, | 340 | 0x0120b606, |
341 | 0xb60824b6, | 341 | 0xb60130b6, |
342 | 0x2fb20834, | 342 | 0x34b60824, |
343 | 0x0002687e, | 343 | 0x7e2fb208, |
344 | 0xbb002fbb, | 344 | 0xbb000268, |
345 | 0x0080003f, | 345 | 0x3fbb002f, |
346 | 0x03f60201, | 346 | 0x01008000, |
347 | 0xbd04bd00, | 347 | 0x0003f602, |
348 | 0x1f29f024, | 348 | 0x24bd04bd, |
349 | 0x02300080, | 349 | 0x801f29f0, |
350 | 0xbd0002f6, | 350 | 0xf6023000, |
351 | /* 0x0445: main */ | 351 | 0x04bd0002, |
352 | 0x0031f404, | 352 | /* 0x0448: main */ |
353 | 0x0d0028f4, | 353 | 0xf40031f4, |
354 | 0x00377e24, | 354 | 0x240d0028, |
355 | 0xf401f400, | 355 | 0x0000377e, |
356 | 0xf404e4b0, | 356 | 0xb0f401f4, |
357 | 0x81fe1d18, | 357 | 0x18f404e4, |
358 | 0xbd060201, | 358 | 0x0181fe1d, |
359 | 0x0412fd20, | 359 | 0x20bd0602, |
360 | 0xfd01e4b6, | 360 | 0xb60412fd, |
361 | 0x18fe051e, | 361 | 0x1efd01e4, |
362 | 0x05187e00, | 362 | 0x0018fe05, |
363 | 0xd40ef400, | 363 | 0x00051b7e, |
364 | /* 0x0474: main_not_ctx_xfer */ | 364 | /* 0x0477: main_not_ctx_xfer */ |
365 | 0xf010ef94, | 365 | 0x94d40ef4, |
366 | 0xf87e01f5, | 366 | 0xf5f010ef, |
367 | 0x0ef40002, | 367 | 0x02f87e01, |
368 | /* 0x0481: ih */ | 368 | 0xc70ef400, |
369 | 0xfe80f9c7, | 369 | /* 0x0484: ih */ |
370 | 0x80f90188, | 370 | 0x88fe80f9, |
371 | 0xa0f990f9, | 371 | 0xf980f901, |
372 | 0xd0f9b0f9, | 372 | 0xf9a0f990, |
373 | 0xf0f9e0f9, | 373 | 0xf9d0f9b0, |
374 | 0x004a04bd, | 374 | 0xbdf0f9e0, |
375 | 0x00aacf02, | 375 | 0x02004a04, |
376 | 0xf404abc4, | 376 | 0xc400aacf, |
377 | 0x240d1f0b, | 377 | 0x0bf404ab, |
378 | 0xcf1a004e, | 378 | 0x4e240d1f, |
379 | 0x004f00ee, | 379 | 0xeecf1a00, |
380 | 0x00ffcf19, | 380 | 0x19004f00, |
381 | 0x0000047e, | 381 | 0x7e00ffcf, |
382 | 0x0040010e, | 382 | 0x0e000004, |
383 | 0x000ef61d, | 383 | 0x1d004001, |
384 | /* 0x04be: ih_no_fifo */ | 384 | 0xbd000ef6, |
385 | 0x004004bd, | 385 | /* 0x04c1: ih_no_fifo */ |
386 | 0x000af601, | 386 | 0x01004004, |
387 | 0xf0fc04bd, | 387 | 0xbd000af6, |
388 | 0xd0fce0fc, | 388 | 0xfcf0fc04, |
389 | 0xa0fcb0fc, | 389 | 0xfcd0fce0, |
390 | 0x80fc90fc, | 390 | 0xfca0fcb0, |
391 | 0xfc0088fe, | 391 | 0xfe80fc90, |
392 | 0x0032f480, | 392 | 0x80fc0088, |
393 | /* 0x04de: hub_barrier_done */ | 393 | 0xf80032f4, |
394 | 0x010f01f8, | 394 | /* 0x04e1: hub_barrier_done */ |
395 | 0xbb040e98, | 395 | 0x98010f01, |
396 | 0xffb204fe, | 396 | 0xfebb040e, |
397 | 0x4094188e, | 397 | 0x8effb204, |
398 | 0x00008f7e, | 398 | 0x7e409418, |
399 | /* 0x04f2: ctx_redswitch */ | 399 | 0xf800008f, |
400 | 0x200f00f8, | 400 | /* 0x04f5: ctx_redswitch */ |
401 | 0x80200f00, | ||
402 | 0xf6018500, | ||
403 | 0x04bd000f, | ||
404 | /* 0x0502: ctx_redswitch_delay */ | ||
405 | 0xe2b6080e, | ||
406 | 0xfd1bf401, | ||
407 | 0x0800f5f1, | ||
408 | 0x0200f5f1, | ||
401 | 0x01850080, | 409 | 0x01850080, |
402 | 0xbd000ff6, | 410 | 0xbd000ff6, |
403 | /* 0x04ff: ctx_redswitch_delay */ | 411 | /* 0x051b: ctx_xfer */ |
404 | 0xb6080e04, | 412 | 0x8000f804, |
405 | 0x1bf401e2, | 413 | 0xf6028100, |
406 | 0x00f5f1fd, | 414 | 0x04bd000f, |
407 | 0x00f5f108, | 415 | 0x7e0711f4, |
408 | 0x85008002, | 416 | /* 0x052b: ctx_xfer_not_load */ |
409 | 0x000ff601, | 417 | 0x7e0004f5, |
410 | 0x00f804bd, | 418 | 0xbd000216, |
411 | /* 0x0518: ctx_xfer */ | 419 | 0x47fc8024, |
412 | 0x02810080, | ||
413 | 0xbd000ff6, | ||
414 | 0x0711f404, | ||
415 | 0x0004f27e, | ||
416 | /* 0x0528: ctx_xfer_not_load */ | ||
417 | 0x0002167e, | ||
418 | 0xfc8024bd, | ||
419 | 0x02f60247, | ||
420 | 0xf004bd00, | ||
421 | 0x20b6012c, | ||
422 | 0x4afc8003, | ||
423 | 0x0002f602, | 420 | 0x0002f602, |
424 | 0xacf004bd, | 421 | 0x2cf004bd, |
425 | 0x02a5f001, | 422 | 0x0320b601, |
426 | 0x5000008b, | 423 | 0x024afc80, |
427 | 0xb6040c98, | 424 | 0xbd0002f6, |
428 | 0xbcbb0fc4, | 425 | 0x01acf004, |
429 | 0x000c9800, | 426 | 0x8b02a5f0, |
430 | 0x0e010d98, | 427 | 0x98500000, |
431 | 0x013d7e00, | ||
432 | 0x01acf000, | ||
433 | 0x5040008b, | ||
434 | 0xb6040c98, | ||
435 | 0xbcbb0fc4, | ||
436 | 0x010c9800, | ||
437 | 0x98020d98, | ||
438 | 0x004e060f, | ||
439 | 0x013d7e08, | ||
440 | 0x01acf000, | ||
441 | 0x8b04a5f0, | ||
442 | 0x98503000, | ||
443 | 0xc4b6040c, | 428 | 0xc4b6040c, |
444 | 0x00bcbb0f, | 429 | 0x00bcbb0f, |
445 | 0x98020c98, | 430 | 0x98000c98, |
446 | 0x0f98030d, | 431 | 0x000e010d, |
447 | 0x02004e08, | ||
448 | 0x00013d7e, | 432 | 0x00013d7e, |
449 | 0x00020a7e, | 433 | 0x8b01acf0, |
450 | 0xf40601f4, | 434 | 0x98504000, |
451 | /* 0x05b2: ctx_xfer_post */ | 435 | 0xc4b6040c, |
452 | 0x277e0712, | 436 | 0x00bcbb0f, |
453 | /* 0x05b6: ctx_xfer_done */ | 437 | 0x98010c98, |
454 | 0xde7e0002, | 438 | 0x0f98020d, |
455 | 0x00f80004, | 439 | 0x08004e06, |
456 | 0x00000000, | 440 | 0x00013d7e, |
441 | 0xf001acf0, | ||
442 | 0x008b04a5, | ||
443 | 0x0c985030, | ||
444 | 0x0fc4b604, | ||
445 | 0x9800bcbb, | ||
446 | 0x0d98020c, | ||
447 | 0x080f9803, | ||
448 | 0x7e02004e, | ||
449 | 0x7e00013d, | ||
450 | 0xf400020a, | ||
451 | 0x12f40601, | ||
452 | /* 0x05b5: ctx_xfer_post */ | ||
453 | 0x02277e07, | ||
454 | /* 0x05b9: ctx_xfer_done */ | ||
455 | 0x04e17e00, | ||
456 | 0x0000f800, | ||
457 | 0x00000000, | 457 | 0x00000000, |
458 | 0x00000000, | 458 | 0x00000000, |
459 | 0x00000000, | 459 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h index 51f5c3c6e966..11bf363a6ae9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h | |||
@@ -289,7 +289,7 @@ uint32_t gm107_grgpc_code[] = { | |||
289 | 0x020014fe, | 289 | 0x020014fe, |
290 | 0x12004002, | 290 | 0x12004002, |
291 | 0xbd0002f6, | 291 | 0xbd0002f6, |
292 | 0x05b04104, | 292 | 0x05b34104, |
293 | 0x400010fe, | 293 | 0x400010fe, |
294 | 0x00f60700, | 294 | 0x00f60700, |
295 | 0x0204bd00, | 295 | 0x0204bd00, |
@@ -308,259 +308,259 @@ uint32_t gm107_grgpc_code[] = { | |||
308 | 0xc900800f, | 308 | 0xc900800f, |
309 | 0x0002f601, | 309 | 0x0002f601, |
310 | 0x308e04bd, | 310 | 0x308e04bd, |
311 | 0x24bd500c, | 311 | 0xe5f0500c, |
312 | 0x44bd34bd, | 312 | 0xbd24bd01, |
313 | /* 0x03b0: init_unk_loop */ | 313 | /* 0x03b3: init_unk_loop */ |
314 | 0x0000657e, | 314 | 0x7e44bd34, |
315 | 0xf400f6b0, | 315 | 0xb0000065, |
316 | 0x010f0e0b, | 316 | 0x0bf400f6, |
317 | 0xfd04f2bb, | 317 | 0xbb010f0e, |
318 | 0x30b6054f, | 318 | 0x4ffd04f2, |
319 | /* 0x03c5: init_unk_next */ | 319 | 0x0130b605, |
320 | 0x0120b601, | 320 | /* 0x03c8: init_unk_next */ |
321 | 0xb004e0b6, | 321 | 0xb60120b6, |
322 | 0x1bf40226, | 322 | 0x26b004e0, |
323 | /* 0x03d1: init_unk_done */ | 323 | 0xe21bf402, |
324 | 0x0703b5e2, | 324 | /* 0x03d4: init_unk_done */ |
325 | 0x820804b5, | 325 | 0xb50703b5, |
326 | 0xcf020100, | 326 | 0x00820804, |
327 | 0x34bd0022, | 327 | 0x22cf0201, |
328 | 0x80082595, | 328 | 0x9534bd00, |
329 | 0xf601c000, | 329 | 0x00800825, |
330 | 0x05f601c0, | ||
331 | 0x8004bd00, | ||
332 | 0xf601c100, | ||
330 | 0x04bd0005, | 333 | 0x04bd0005, |
331 | 0x01c10080, | 334 | 0x98000e98, |
332 | 0xbd0005f6, | 335 | 0x207e010f, |
333 | 0x000e9804, | 336 | 0x2fbb0001, |
334 | 0x7e010f98, | 337 | 0x003fbb00, |
335 | 0xbb000120, | 338 | 0x98010e98, |
336 | 0x3fbb002f, | 339 | 0x207e020f, |
337 | 0x010e9800, | 340 | 0x0e980001, |
338 | 0x7e020f98, | 341 | 0x00effd05, |
339 | 0x98000120, | 342 | 0xbb002ebb, |
340 | 0xeffd050e, | 343 | 0x0e98003e, |
341 | 0x002ebb00, | 344 | 0x030f9802, |
342 | 0x98003ebb, | 345 | 0x0001207e, |
343 | 0x0f98020e, | 346 | 0xfd070e98, |
344 | 0x01207e03, | 347 | 0x2ebb00ef, |
345 | 0x070e9800, | 348 | 0x003ebb00, |
346 | 0xbb00effd, | 349 | 0x800235b6, |
347 | 0x3ebb002e, | 350 | 0xf601d300, |
348 | 0x0235b600, | 351 | 0x04bd0003, |
349 | 0x01d30080, | 352 | 0xb60825b6, |
350 | 0xbd0003f6, | 353 | 0x20b60635, |
351 | 0x0825b604, | 354 | 0x0130b601, |
352 | 0xb60635b6, | 355 | 0xb60824b6, |
353 | 0x30b60120, | 356 | 0x2fb20834, |
354 | 0x0824b601, | 357 | 0x0002687e, |
355 | 0xb20834b6, | 358 | 0xbb002fbb, |
356 | 0x02687e2f, | 359 | 0x3f0f003f, |
357 | 0x002fbb00, | 360 | 0x501d608e, |
358 | 0x0f003fbb, | 361 | 0xb201e5f0, |
359 | 0x8effb23f, | 362 | 0x008f7eff, |
360 | 0xf0501d60, | 363 | 0x8e0c0f00, |
361 | 0x8f7e01e5, | ||
362 | 0x0c0f0000, | ||
363 | 0xa88effb2, | ||
364 | 0xe5f0501d, | ||
365 | 0x008f7e01, | ||
366 | 0x03147e00, | ||
367 | 0xb23f0f00, | ||
368 | 0x1d608eff, | ||
369 | 0x01e5f050, | ||
370 | 0x00008f7e, | ||
371 | 0xffb2000f, | ||
372 | 0x501d9c8e, | ||
373 | 0x7e01e5f0, | ||
374 | 0x0f00008f, | ||
375 | 0x03147e01, | ||
376 | 0x8effb200, | ||
377 | 0xf0501da8, | 364 | 0xf0501da8, |
378 | 0x8f7e01e5, | 365 | 0xffb201e5, |
379 | 0xff0f0000, | 366 | 0x00008f7e, |
380 | 0x988effb2, | 367 | 0x0003147e, |
368 | 0x608e3f0f, | ||
381 | 0xe5f0501d, | 369 | 0xe5f0501d, |
382 | 0x008f7e01, | 370 | 0x7effb201, |
383 | 0xb2020f00, | 371 | 0x0f00008f, |
384 | 0x1da88eff, | 372 | 0x1d9c8e00, |
385 | 0x01e5f050, | 373 | 0x01e5f050, |
386 | 0x00008f7e, | 374 | 0x8f7effb2, |
375 | 0x010f0000, | ||
387 | 0x0003147e, | 376 | 0x0003147e, |
388 | 0x85050498, | 377 | 0x501da88e, |
389 | 0x98504000, | 378 | 0xb201e5f0, |
390 | 0x64b60406, | 379 | 0x008f7eff, |
391 | 0x0056bb0f, | 380 | 0x8eff0f00, |
392 | /* 0x04e0: tpc_strand_init_tpc_loop */ | 381 | 0xf0501d98, |
393 | 0x05705eb8, | 382 | 0xffb201e5, |
394 | 0x00657e00, | ||
395 | 0xbdf6b200, | ||
396 | /* 0x04ed: tpc_strand_init_idx_loop */ | ||
397 | 0x605eb874, | ||
398 | 0x7fb20005, | ||
399 | 0x00008f7e, | ||
400 | 0x05885eb8, | ||
401 | 0x082f9500, | ||
402 | 0x00008f7e, | ||
403 | 0x058c5eb8, | ||
404 | 0x082f9500, | ||
405 | 0x00008f7e, | 383 | 0x00008f7e, |
406 | 0x05905eb8, | 384 | 0xa88e020f, |
407 | 0x00657e00, | ||
408 | 0x06f5b600, | ||
409 | 0xb601f0b6, | ||
410 | 0x2fbb08f4, | ||
411 | 0x003fbb00, | ||
412 | 0xb60170b6, | ||
413 | 0x1bf40162, | ||
414 | 0x0050b7bf, | ||
415 | 0x0142b608, | ||
416 | 0x0fa81bf4, | ||
417 | 0x8effb23f, | ||
418 | 0xf0501d60, | ||
419 | 0x8f7e01e5, | ||
420 | 0x0d0f0000, | ||
421 | 0xa88effb2, | ||
422 | 0xe5f0501d, | 385 | 0xe5f0501d, |
423 | 0x008f7e01, | 386 | 0x7effb201, |
424 | 0x03147e00, | 387 | 0x7e00008f, |
425 | 0x01008000, | 388 | 0x98000314, |
426 | 0x0003f602, | 389 | 0x00850504, |
427 | 0x24bd04bd, | 390 | 0x06985040, |
428 | 0x801f29f0, | 391 | 0x0f64b604, |
429 | 0xf6023000, | 392 | /* 0x04e3: tpc_strand_init_tpc_loop */ |
430 | 0x04bd0002, | 393 | 0xb80056bb, |
431 | /* 0x0574: main */ | 394 | 0x0005705e, |
432 | 0xf40031f4, | 395 | 0x0000657e, |
433 | 0x240d0028, | 396 | 0x74bdf6b2, |
434 | 0x0000377e, | 397 | /* 0x04f0: tpc_strand_init_idx_loop */ |
435 | 0xb0f401f4, | 398 | 0x05605eb8, |
436 | 0x18f404e4, | 399 | 0x7e7fb200, |
437 | 0x0181fe1d, | 400 | 0xb800008f, |
438 | 0x20bd0602, | 401 | 0x0005885e, |
439 | 0xb60412fd, | 402 | 0x7e082f95, |
440 | 0x1efd01e4, | 403 | 0xb800008f, |
441 | 0x0018fe05, | 404 | 0x00058c5e, |
442 | 0x0006477e, | 405 | 0x7e082f95, |
443 | /* 0x05a3: main_not_ctx_xfer */ | 406 | 0xb800008f, |
444 | 0x94d40ef4, | 407 | 0x0005905e, |
445 | 0xf5f010ef, | 408 | 0x0000657e, |
446 | 0x02f87e01, | 409 | 0xb606f5b6, |
447 | 0xc70ef400, | 410 | 0xf4b601f0, |
448 | /* 0x05b0: ih */ | 411 | 0x002fbb08, |
449 | 0x88fe80f9, | 412 | 0xb6003fbb, |
450 | 0xf980f901, | 413 | 0x62b60170, |
451 | 0xf9a0f990, | 414 | 0xbf1bf401, |
452 | 0xf9d0f9b0, | 415 | 0x080050b7, |
453 | 0xbdf0f9e0, | 416 | 0xf40142b6, |
454 | 0x02004a04, | 417 | 0x3f0fa81b, |
455 | 0xc400aacf, | 418 | 0x501d608e, |
456 | 0x0bf404ab, | 419 | 0xb201e5f0, |
457 | 0x4e240d1f, | 420 | 0x008f7eff, |
458 | 0xeecf1a00, | 421 | 0x8e0d0f00, |
459 | 0x19004f00, | 422 | 0xf0501da8, |
460 | 0x7e00ffcf, | 423 | 0xffb201e5, |
461 | 0x0e000004, | 424 | 0x00008f7e, |
462 | 0x1d004001, | 425 | 0x0003147e, |
463 | 0xbd000ef6, | 426 | 0x02010080, |
464 | /* 0x05ed: ih_no_fifo */ | 427 | 0xbd0003f6, |
465 | 0x01004004, | 428 | 0xf024bd04, |
466 | 0xbd000af6, | 429 | 0x00801f29, |
467 | 0xfcf0fc04, | 430 | 0x02f60230, |
468 | 0xfcd0fce0, | 431 | /* 0x0577: main */ |
469 | 0xfca0fcb0, | 432 | 0xf404bd00, |
470 | 0xfe80fc90, | 433 | 0x28f40031, |
471 | 0x80fc0088, | 434 | 0x7e240d00, |
472 | 0xf80032f4, | 435 | 0xf4000037, |
473 | /* 0x060d: hub_barrier_done */ | 436 | 0xe4b0f401, |
474 | 0x98010f01, | 437 | 0x1d18f404, |
475 | 0xfebb040e, | 438 | 0x020181fe, |
476 | 0x8effb204, | 439 | 0xfd20bd06, |
477 | 0x7e409418, | 440 | 0xe4b60412, |
478 | 0xf800008f, | 441 | 0x051efd01, |
479 | /* 0x0621: ctx_redswitch */ | 442 | 0x7e0018fe, |
480 | 0x80200f00, | 443 | 0xf400064a, |
444 | /* 0x05a6: main_not_ctx_xfer */ | ||
445 | 0xef94d40e, | ||
446 | 0x01f5f010, | ||
447 | 0x0002f87e, | ||
448 | /* 0x05b3: ih */ | ||
449 | 0xf9c70ef4, | ||
450 | 0x0188fe80, | ||
451 | 0x90f980f9, | ||
452 | 0xb0f9a0f9, | ||
453 | 0xe0f9d0f9, | ||
454 | 0x04bdf0f9, | ||
455 | 0xcf02004a, | ||
456 | 0xabc400aa, | ||
457 | 0x1f0bf404, | ||
458 | 0x004e240d, | ||
459 | 0x00eecf1a, | ||
460 | 0xcf19004f, | ||
461 | 0x047e00ff, | ||
462 | 0x010e0000, | ||
463 | 0xf61d0040, | ||
464 | 0x04bd000e, | ||
465 | /* 0x05f0: ih_no_fifo */ | ||
466 | 0xf6010040, | ||
467 | 0x04bd000a, | ||
468 | 0xe0fcf0fc, | ||
469 | 0xb0fcd0fc, | ||
470 | 0x90fca0fc, | ||
471 | 0x88fe80fc, | ||
472 | 0xf480fc00, | ||
473 | 0x01f80032, | ||
474 | /* 0x0610: hub_barrier_done */ | ||
475 | 0x0e98010f, | ||
476 | 0x04febb04, | ||
477 | 0x188effb2, | ||
478 | 0x8f7e4094, | ||
479 | 0x00f80000, | ||
480 | /* 0x0624: ctx_redswitch */ | ||
481 | 0x0080200f, | ||
482 | 0x0ff60185, | ||
483 | 0x0e04bd00, | ||
484 | /* 0x0631: ctx_redswitch_delay */ | ||
485 | 0x01e2b608, | ||
486 | 0xf1fd1bf4, | ||
487 | 0xf10800f5, | ||
488 | 0x800200f5, | ||
481 | 0xf6018500, | 489 | 0xf6018500, |
482 | 0x04bd000f, | 490 | 0x04bd000f, |
483 | /* 0x062e: ctx_redswitch_delay */ | 491 | /* 0x064a: ctx_xfer */ |
484 | 0xe2b6080e, | 492 | 0x008000f8, |
485 | 0xfd1bf401, | 493 | 0x0ff60281, |
486 | 0x0800f5f1, | 494 | 0x8e04bd00, |
487 | 0x0200f5f1, | 495 | 0xf0501dc4, |
488 | 0x01850080, | 496 | 0xffb201e5, |
489 | 0xbd000ff6, | 497 | 0x00008f7e, |
490 | /* 0x0647: ctx_xfer */ | 498 | 0x7e0711f4, |
491 | 0x8000f804, | 499 | /* 0x0667: ctx_xfer_not_load */ |
492 | 0xf6028100, | 500 | 0x7e000624, |
493 | 0x04bd000f, | 501 | 0xbd000216, |
494 | 0xc48effb2, | 502 | 0x47fc8024, |
495 | 0xe5f0501d, | ||
496 | 0x008f7e01, | ||
497 | 0x0711f400, | ||
498 | 0x0006217e, | ||
499 | /* 0x0664: ctx_xfer_not_load */ | ||
500 | 0x0002167e, | ||
501 | 0xfc8024bd, | ||
502 | 0x02f60247, | ||
503 | 0xf004bd00, | ||
504 | 0x20b6012c, | ||
505 | 0x4afc8003, | ||
506 | 0x0002f602, | 503 | 0x0002f602, |
507 | 0x0c0f04bd, | 504 | 0x2cf004bd, |
508 | 0xa88effb2, | 505 | 0x0320b601, |
509 | 0xe5f0501d, | 506 | 0x024afc80, |
510 | 0x008f7e01, | 507 | 0xbd0002f6, |
511 | 0x03147e00, | 508 | 0x8e0c0f04, |
512 | 0xb23f0f00, | 509 | 0xf0501da8, |
513 | 0x1d608eff, | 510 | 0xffb201e5, |
514 | 0x01e5f050, | ||
515 | 0x00008f7e, | 511 | 0x00008f7e, |
516 | 0xffb2000f, | 512 | 0x0003147e, |
517 | 0x501d9c8e, | 513 | 0x608e3f0f, |
518 | 0x7e01e5f0, | 514 | 0xe5f0501d, |
515 | 0x7effb201, | ||
519 | 0x0f00008f, | 516 | 0x0f00008f, |
520 | 0x03147e01, | 517 | 0x1d9c8e00, |
521 | 0x01fcf000, | ||
522 | 0xb203f0b6, | ||
523 | 0x1da88eff, | ||
524 | 0x01e5f050, | 518 | 0x01e5f050, |
525 | 0x00008f7e, | 519 | 0x8f7effb2, |
526 | 0xf001acf0, | 520 | 0x010f0000, |
527 | 0x008b02a5, | 521 | 0x0003147e, |
528 | 0x0c985000, | 522 | 0xb601fcf0, |
529 | 0x0fc4b604, | 523 | 0xa88e03f0, |
530 | 0x9800bcbb, | 524 | 0xe5f0501d, |
531 | 0x0d98000c, | 525 | 0x7effb201, |
532 | 0x7e000e01, | 526 | 0xf000008f, |
533 | 0xf000013d, | ||
534 | 0x008b01ac, | ||
535 | 0x0c985040, | ||
536 | 0x0fc4b604, | ||
537 | 0x9800bcbb, | ||
538 | 0x0d98010c, | ||
539 | 0x060f9802, | ||
540 | 0x7e08004e, | ||
541 | 0xf000013d, | ||
542 | 0xa5f001ac, | 527 | 0xa5f001ac, |
543 | 0x30008b04, | 528 | 0x00008b02, |
544 | 0x040c9850, | 529 | 0x040c9850, |
545 | 0xbb0fc4b6, | 530 | 0xbb0fc4b6, |
546 | 0x0c9800bc, | 531 | 0x0c9800bc, |
547 | 0x030d9802, | 532 | 0x010d9800, |
548 | 0x4e080f98, | 533 | 0x3d7e000e, |
549 | 0x3d7e0200, | 534 | 0xacf00001, |
550 | 0x0a7e0001, | 535 | 0x40008b01, |
551 | 0x147e0002, | 536 | 0x040c9850, |
552 | 0x01f40003, | 537 | 0xbb0fc4b6, |
553 | 0x1a12f406, | 538 | 0x0c9800bc, |
554 | /* 0x073c: ctx_xfer_post */ | 539 | 0x020d9801, |
555 | 0x0002277e, | 540 | 0x4e060f98, |
556 | 0xffb20d0f, | 541 | 0x3d7e0800, |
557 | 0x501da88e, | 542 | 0xacf00001, |
558 | 0x7e01e5f0, | 543 | 0x04a5f001, |
559 | 0x7e00008f, | 544 | 0x5030008b, |
560 | /* 0x0753: ctx_xfer_done */ | 545 | 0xb6040c98, |
561 | 0x7e000314, | 546 | 0xbcbb0fc4, |
562 | 0xf800060d, | 547 | 0x020c9800, |
563 | 0x00000000, | 548 | 0x98030d98, |
549 | 0x004e080f, | ||
550 | 0x013d7e02, | ||
551 | 0x020a7e00, | ||
552 | 0x03147e00, | ||
553 | 0x0601f400, | ||
554 | /* 0x073f: ctx_xfer_post */ | ||
555 | 0x7e1a12f4, | ||
556 | 0x0f000227, | ||
557 | 0x1da88e0d, | ||
558 | 0x01e5f050, | ||
559 | 0x8f7effb2, | ||
560 | 0x147e0000, | ||
561 | /* 0x0756: ctx_xfer_done */ | ||
562 | 0x107e0003, | ||
563 | 0x00f80006, | ||
564 | 0x00000000, | 564 | 0x00000000, |
565 | 0x00000000, | 565 | 0x00000000, |
566 | 0x00000000, | 566 | 0x00000000, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dda7a7d224c9..9f5dfc85147a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
@@ -143,7 +143,7 @@ gf100_gr_zbc_depth_get(struct gf100_gr *gr, int format, | |||
143 | static int | 143 | static int |
144 | gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) | 144 | gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) |
145 | { | 145 | { |
146 | struct gf100_gr *gr = (void *)object->engine; | 146 | struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); |
147 | union { | 147 | union { |
148 | struct fermi_a_zbc_color_v0 v0; | 148 | struct fermi_a_zbc_color_v0 v0; |
149 | } *args = data; | 149 | } *args = data; |
@@ -189,7 +189,7 @@ gf100_fermi_mthd_zbc_color(struct nvkm_object *object, void *data, u32 size) | |||
189 | static int | 189 | static int |
190 | gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) | 190 | gf100_fermi_mthd_zbc_depth(struct nvkm_object *object, void *data, u32 size) |
191 | { | 191 | { |
192 | struct gf100_gr *gr = (void *)object->engine; | 192 | struct gf100_gr *gr = gf100_gr(nvkm_gr(object->engine)); |
193 | union { | 193 | union { |
194 | struct fermi_a_zbc_depth_v0 v0; | 194 | struct fermi_a_zbc_depth_v0 v0; |
195 | } *args = data; | 195 | } *args = data; |
@@ -1530,6 +1530,8 @@ gf100_gr_oneinit(struct nvkm_gr *base) | |||
1530 | gr->ppc_nr[i] = gr->func->ppc_nr; | 1530 | gr->ppc_nr[i] = gr->func->ppc_nr; |
1531 | for (j = 0; j < gr->ppc_nr[i]; j++) { | 1531 | for (j = 0; j < gr->ppc_nr[i]; j++) { |
1532 | u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); | 1532 | u8 mask = nvkm_rd32(device, GPC_UNIT(i, 0x0c30 + (j * 4))); |
1533 | if (mask) | ||
1534 | gr->ppc_mask[i] |= (1 << j); | ||
1533 | gr->ppc_tpc_nr[i][j] = hweight8(mask); | 1535 | gr->ppc_tpc_nr[i][j] = hweight8(mask); |
1534 | } | 1536 | } |
1535 | } | 1537 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 4611961b1187..02e78b8d93f6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h | |||
@@ -97,6 +97,7 @@ struct gf100_gr { | |||
97 | u8 tpc_nr[GPC_MAX]; | 97 | u8 tpc_nr[GPC_MAX]; |
98 | u8 tpc_total; | 98 | u8 tpc_total; |
99 | u8 ppc_nr[GPC_MAX]; | 99 | u8 ppc_nr[GPC_MAX]; |
100 | u8 ppc_mask[GPC_MAX]; | ||
100 | u8 ppc_tpc_nr[GPC_MAX][4]; | 101 | u8 ppc_tpc_nr[GPC_MAX][4]; |
101 | 102 | ||
102 | struct nvkm_memory *unk4188b4; | 103 | struct nvkm_memory *unk4188b4; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c index 43006db6fd58..80fed7e78dcb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c | |||
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan) | |||
83 | fan->type = NVBIOS_THERM_FAN_UNK; | 83 | fan->type = NVBIOS_THERM_FAN_UNK; |
84 | } | 84 | } |
85 | 85 | ||
86 | fan->fan_mode = NVBIOS_THERM_FAN_LINEAR; | ||
86 | fan->min_duty = nvbios_rd08(bios, data + 0x02); | 87 | fan->min_duty = nvbios_rd08(bios, data + 0x02); |
87 | fan->max_duty = nvbios_rd08(bios, data + 0x03); | 88 | fan->max_duty = nvbios_rd08(bios, data + 0x03); |
88 | 89 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c index 895ba74057d4..1d7dd38292b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c | |||
@@ -97,7 +97,9 @@ static void * | |||
97 | nvkm_instobj_dtor(struct nvkm_memory *memory) | 97 | nvkm_instobj_dtor(struct nvkm_memory *memory) |
98 | { | 98 | { |
99 | struct nvkm_instobj *iobj = nvkm_instobj(memory); | 99 | struct nvkm_instobj *iobj = nvkm_instobj(memory); |
100 | spin_lock(&iobj->imem->lock); | ||
100 | list_del(&iobj->head); | 101 | list_del(&iobj->head); |
102 | spin_unlock(&iobj->imem->lock); | ||
101 | nvkm_memory_del(&iobj->parent); | 103 | nvkm_memory_del(&iobj->parent); |
102 | return iobj; | 104 | return iobj; |
103 | } | 105 | } |
@@ -190,7 +192,9 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, | |||
190 | nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); | 192 | nvkm_memory_ctor(&nvkm_instobj_func_slow, &iobj->memory); |
191 | iobj->parent = memory; | 193 | iobj->parent = memory; |
192 | iobj->imem = imem; | 194 | iobj->imem = imem; |
195 | spin_lock(&iobj->imem->lock); | ||
193 | list_add_tail(&iobj->head, &imem->list); | 196 | list_add_tail(&iobj->head, &imem->list); |
197 | spin_unlock(&iobj->imem->lock); | ||
194 | memory = &iobj->memory; | 198 | memory = &iobj->memory; |
195 | } | 199 | } |
196 | 200 | ||
@@ -309,5 +313,6 @@ nvkm_instmem_ctor(const struct nvkm_instmem_func *func, | |||
309 | { | 313 | { |
310 | nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); | 314 | nvkm_subdev_ctor(&nvkm_instmem, device, index, 0, &imem->subdev); |
311 | imem->func = func; | 315 | imem->func = func; |
316 | spin_lock_init(&imem->lock); | ||
312 | INIT_LIST_HEAD(&imem->list); | 317 | INIT_LIST_HEAD(&imem->list); |
313 | } | 318 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c index d942fa7b9f18..86f9f3b13f71 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c | |||
@@ -81,9 +81,7 @@ gk104_pmu_pgob(struct nvkm_pmu *pmu, bool enable) | |||
81 | nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); | 81 | nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); |
82 | nvkm_rd32(device, 0x000200); | 82 | nvkm_rd32(device, 0x000200); |
83 | 83 | ||
84 | if ( nvkm_boolopt(device->cfgopt, "War00C800_0", | 84 | if (nvkm_boolopt(device->cfgopt, "War00C800_0", true)) { |
85 | device->quirk ? device->quirk->War00C800_0 : false)) { | ||
86 | nvkm_info(&pmu->subdev, "hw bug workaround enabled\n"); | ||
87 | switch (device->chipset) { | 85 | switch (device->chipset) { |
88 | case 0xe4: | 86 | case 0xe4: |
89 | magic(device, 0x04000000); | 87 | magic(device, 0x04000000); |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c index b61509e26ec9..b735173a18ff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk104.c | |||
@@ -59,7 +59,7 @@ gk104_volt_set(struct nvkm_volt *base, u32 uv) | |||
59 | duty = (uv - bios->base) * div / bios->pwm_range; | 59 | duty = (uv - bios->base) * div / bios->pwm_range; |
60 | 60 | ||
61 | nvkm_wr32(device, 0x20340, div); | 61 | nvkm_wr32(device, 0x20340, div); |
62 | nvkm_wr32(device, 0x20344, 0x8000000 | duty); | 62 | nvkm_wr32(device, 0x20344, 0x80000000 | duty); |
63 | 63 | ||
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index b8e4cdec28c3..24f92bea39c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c | |||
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, | |||
112 | dma_addr_t paddr; | 112 | dma_addr_t paddr; |
113 | int ret; | 113 | int ret; |
114 | 114 | ||
115 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
116 | * with video overlays: | ||
117 | */ | ||
118 | sizes->surface_bpp = 32; | 115 | sizes->surface_bpp = 32; |
119 | sizes->surface_depth = 32; | 116 | sizes->surface_depth = 24; |
120 | 117 | ||
121 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | 118 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, |
122 | sizes->surface_height, sizes->surface_bpp, | 119 | sizes->surface_height, sizes->surface_bpp, |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 248953d2fdb7..f81fb2641097 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -4173,11 +4173,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
4173 | control |= ib->length_dw | (vm_id << 24); | 4173 | control |= ib->length_dw | (vm_id << 24); |
4174 | 4174 | ||
4175 | radeon_ring_write(ring, header); | 4175 | radeon_ring_write(ring, header); |
4176 | radeon_ring_write(ring, | 4176 | radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFFC)); |
4177 | #ifdef __BIG_ENDIAN | ||
4178 | (2 << 0) | | ||
4179 | #endif | ||
4180 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
4181 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | 4177 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); |
4182 | radeon_ring_write(ring, control); | 4178 | radeon_ring_write(ring, control); |
4183 | } | 4179 | } |
@@ -8472,7 +8468,7 @@ restart_ih: | |||
8472 | if (queue_dp) | 8468 | if (queue_dp) |
8473 | schedule_work(&rdev->dp_work); | 8469 | schedule_work(&rdev->dp_work); |
8474 | if (queue_hotplug) | 8470 | if (queue_hotplug) |
8475 | schedule_work(&rdev->hotplug_work); | 8471 | schedule_delayed_work(&rdev->hotplug_work, 0); |
8476 | if (queue_reset) { | 8472 | if (queue_reset) { |
8477 | rdev->needs_reset = true; | 8473 | rdev->needs_reset = true; |
8478 | wake_up_all(&rdev->fence_queue); | 8474 | wake_up_all(&rdev->fence_queue); |
@@ -9630,6 +9626,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev, | |||
9630 | (rdev->disp_priority == 2)) { | 9626 | (rdev->disp_priority == 2)) { |
9631 | DRM_DEBUG_KMS("force priority to high\n"); | 9627 | DRM_DEBUG_KMS("force priority to high\n"); |
9632 | } | 9628 | } |
9629 | |||
9630 | /* Save number of lines the linebuffer leads before the scanout */ | ||
9631 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
9633 | } | 9632 | } |
9634 | 9633 | ||
9635 | /* select wm A */ | 9634 | /* select wm A */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7f33767d7ed6..2ad462896896 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
2372 | c.full = dfixed_div(c, a); | 2372 | c.full = dfixed_div(c, a); |
2373 | priority_b_mark = dfixed_trunc(c); | 2373 | priority_b_mark = dfixed_trunc(c); |
2374 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | 2374 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
2375 | |||
2376 | /* Save number of lines the linebuffer leads before the scanout */ | ||
2377 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
2375 | } | 2378 | } |
2376 | 2379 | ||
2377 | /* select wm A */ | 2380 | /* select wm A */ |
@@ -5344,7 +5347,7 @@ restart_ih: | |||
5344 | if (queue_dp) | 5347 | if (queue_dp) |
5345 | schedule_work(&rdev->dp_work); | 5348 | schedule_work(&rdev->dp_work); |
5346 | if (queue_hotplug) | 5349 | if (queue_hotplug) |
5347 | schedule_work(&rdev->hotplug_work); | 5350 | schedule_delayed_work(&rdev->hotplug_work, 0); |
5348 | if (queue_hdmi) | 5351 | if (queue_hdmi) |
5349 | schedule_work(&rdev->audio_work); | 5352 | schedule_work(&rdev->audio_work); |
5350 | if (queue_thermal && rdev->pm.dpm_enabled) | 5353 | if (queue_thermal && rdev->pm.dpm_enabled) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 238b13f045c1..9e7e2bf03b81 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev) | |||
806 | status = r100_irq_ack(rdev); | 806 | status = r100_irq_ack(rdev); |
807 | } | 807 | } |
808 | if (queue_hotplug) | 808 | if (queue_hotplug) |
809 | schedule_work(&rdev->hotplug_work); | 809 | schedule_delayed_work(&rdev->hotplug_work, 0); |
810 | if (rdev->msi_enabled) { | 810 | if (rdev->msi_enabled) { |
811 | switch (rdev->family) { | 811 | switch (rdev->family) { |
812 | case CHIP_RS400: | 812 | case CHIP_RS400: |
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3217 | uint32_t pixel_bytes1 = 0; | 3217 | uint32_t pixel_bytes1 = 0; |
3218 | uint32_t pixel_bytes2 = 0; | 3218 | uint32_t pixel_bytes2 = 0; |
3219 | 3219 | ||
3220 | /* Guess line buffer size to be 8192 pixels */ | ||
3221 | u32 lb_size = 8192; | ||
3222 | |||
3220 | if (!rdev->mode_info.mode_config_initialized) | 3223 | if (!rdev->mode_info.mode_config_initialized) |
3221 | return; | 3224 | return; |
3222 | 3225 | ||
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3631 | DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", | 3634 | DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", |
3632 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | 3635 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
3633 | } | 3636 | } |
3637 | |||
3638 | /* Save number of lines the linebuffer leads before the scanout */ | ||
3639 | if (mode1) | ||
3640 | rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); | ||
3641 | |||
3642 | if (mode2) | ||
3643 | rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); | ||
3634 | } | 3644 | } |
3635 | 3645 | ||
3636 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | 3646 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4ea5b10ff5f4..cc2fdf0be37a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -4276,7 +4276,7 @@ restart_ih: | |||
4276 | WREG32(IH_RB_RPTR, rptr); | 4276 | WREG32(IH_RB_RPTR, rptr); |
4277 | } | 4277 | } |
4278 | if (queue_hotplug) | 4278 | if (queue_hotplug) |
4279 | schedule_work(&rdev->hotplug_work); | 4279 | schedule_delayed_work(&rdev->hotplug_work, 0); |
4280 | if (queue_hdmi) | 4280 | if (queue_hdmi) |
4281 | schedule_work(&rdev->audio_work); | 4281 | schedule_work(&rdev->audio_work); |
4282 | if (queue_thermal && rdev->pm.dpm_enabled) | 4282 | if (queue_thermal && rdev->pm.dpm_enabled) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b6cbd816537e..87db64983ea8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -2414,7 +2414,7 @@ struct radeon_device { | |||
2414 | struct r600_ih ih; /* r6/700 interrupt ring */ | 2414 | struct r600_ih ih; /* r6/700 interrupt ring */ |
2415 | struct radeon_rlc rlc; | 2415 | struct radeon_rlc rlc; |
2416 | struct radeon_mec mec; | 2416 | struct radeon_mec mec; |
2417 | struct work_struct hotplug_work; | 2417 | struct delayed_work hotplug_work; |
2418 | struct work_struct dp_work; | 2418 | struct work_struct dp_work; |
2419 | struct work_struct audio_work; | 2419 | struct work_struct audio_work; |
2420 | int num_crtc; /* number of crtcs */ | 2420 | int num_crtc; /* number of crtcs */ |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index fe994aac3b04..c77d349c561c 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { | |||
54 | /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ | 54 | /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ |
55 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, | 55 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, |
56 | PCI_VENDOR_ID_IBM, 0x0550, 1}, | 56 | PCI_VENDOR_ID_IBM, 0x0550, 1}, |
57 | /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */ | ||
58 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, | ||
59 | PCI_VENDOR_ID_IBM, 0x054d, 1}, | ||
57 | /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ | 60 | /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ |
58 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, | 61 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, |
59 | PCI_VENDOR_ID_IBM, 0x0530, 1}, | 62 | PCI_VENDOR_ID_IBM, 0x0530, 1}, |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5a2cafb4f1bc..340f3f549f29 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
1234 | if (r < 0) | 1234 | if (r < 0) |
1235 | return connector_status_disconnected; | 1235 | return connector_status_disconnected; |
1236 | 1236 | ||
1237 | if (radeon_connector->detected_hpd_without_ddc) { | ||
1238 | force = true; | ||
1239 | radeon_connector->detected_hpd_without_ddc = false; | ||
1240 | } | ||
1241 | |||
1237 | if (!force && radeon_check_hpd_status_unchanged(connector)) { | 1242 | if (!force && radeon_check_hpd_status_unchanged(connector)) { |
1238 | ret = connector->status; | 1243 | ret = connector->status; |
1239 | goto exit; | 1244 | goto exit; |
1240 | } | 1245 | } |
1241 | 1246 | ||
1242 | if (radeon_connector->ddc_bus) | 1247 | if (radeon_connector->ddc_bus) { |
1243 | dret = radeon_ddc_probe(radeon_connector, false); | 1248 | dret = radeon_ddc_probe(radeon_connector, false); |
1249 | |||
1250 | /* Sometimes the pins required for the DDC probe on DVI | ||
1251 | * connectors don't make contact at the same time that the ones | ||
1252 | * for HPD do. If the DDC probe fails even though we had an HPD | ||
1253 | * signal, try again later */ | ||
1254 | if (!dret && !force && | ||
1255 | connector->status != connector_status_connected) { | ||
1256 | DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); | ||
1257 | radeon_connector->detected_hpd_without_ddc = true; | ||
1258 | schedule_delayed_work(&rdev->hotplug_work, | ||
1259 | msecs_to_jiffies(1000)); | ||
1260 | goto exit; | ||
1261 | } | ||
1262 | } | ||
1244 | if (dret) { | 1263 | if (dret) { |
1245 | radeon_connector->detected_by_load = false; | 1264 | radeon_connector->detected_by_load = false; |
1246 | radeon_connector_free_edid(connector); | 1265 | radeon_connector_free_edid(connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index a8d9927ed9eb..1eca0acac016 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) | |||
322 | * to complete in this vblank? | 322 | * to complete in this vblank? |
323 | */ | 323 | */ |
324 | if (update_pending && | 324 | if (update_pending && |
325 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, | 325 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, |
326 | crtc_id, | ||
327 | USE_REAL_VBLANKSTART, | ||
326 | &vpos, &hpos, NULL, NULL, | 328 | &vpos, &hpos, NULL, NULL, |
327 | &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && | 329 | &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && |
328 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || | 330 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || |
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
401 | struct drm_crtc *crtc = &radeon_crtc->base; | 403 | struct drm_crtc *crtc = &radeon_crtc->base; |
402 | unsigned long flags; | 404 | unsigned long flags; |
403 | int r; | 405 | int r; |
406 | int vpos, hpos, stat, min_udelay; | ||
407 | struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; | ||
404 | 408 | ||
405 | down_read(&rdev->exclusive_lock); | 409 | down_read(&rdev->exclusive_lock); |
406 | if (work->fence) { | 410 | if (work->fence) { |
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
437 | /* set the proper interrupt */ | 441 | /* set the proper interrupt */ |
438 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | 442 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); |
439 | 443 | ||
444 | /* If this happens to execute within the "virtually extended" vblank | ||
445 | * interval before the start of the real vblank interval then it needs | ||
446 | * to delay programming the mmio flip until the real vblank is entered. | ||
447 | * This prevents completing a flip too early due to the way we fudge | ||
448 | * our vblank counter and vblank timestamps in order to work around the | ||
449 | * problem that the hw fires vblank interrupts before actual start of | ||
450 | * vblank (when line buffer refilling is done for a frame). It | ||
451 | * complements the fudging logic in radeon_get_crtc_scanoutpos() for | ||
452 | * timestamping and radeon_get_vblank_counter_kms() for vblank counts. | ||
453 | * | ||
454 | * In practice this won't execute very often unless on very fast | ||
455 | * machines because the time window for this to happen is very small. | ||
456 | */ | ||
457 | for (;;) { | ||
458 | /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank | ||
459 | * start in hpos, and to the "fudged earlier" vblank start in | ||
460 | * vpos. | ||
461 | */ | ||
462 | stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, | ||
463 | GET_DISTANCE_TO_VBLANKSTART, | ||
464 | &vpos, &hpos, NULL, NULL, | ||
465 | &crtc->hwmode); | ||
466 | |||
467 | if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
468 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || | ||
469 | !(vpos >= 0 && hpos <= 0)) | ||
470 | break; | ||
471 | |||
472 | /* Sleep at least until estimated real start of hw vblank */ | ||
473 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
474 | min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); | ||
475 | usleep_range(min_udelay, 2 * min_udelay); | ||
476 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
477 | }; | ||
478 | |||
440 | /* do the flip (mmio) */ | 479 | /* do the flip (mmio) */ |
441 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); | 480 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); |
442 | 481 | ||
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1768 | * \param dev Device to query. | 1807 | * \param dev Device to query. |
1769 | * \param crtc Crtc to query. | 1808 | * \param crtc Crtc to query. |
1770 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | 1809 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
1810 | * For driver internal use only also supports these flags: | ||
1811 | * | ||
1812 | * USE_REAL_VBLANKSTART to use the real start of vblank instead | ||
1813 | * of a fudged earlier start of vblank. | ||
1814 | * | ||
1815 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the | ||
1816 | * fudged earlier start of vblank in *vpos and the distance | ||
1817 | * to true start of vblank in *hpos. | ||
1818 | * | ||
1771 | * \param *vpos Location where vertical scanout position should be stored. | 1819 | * \param *vpos Location where vertical scanout position should be stored. |
1772 | * \param *hpos Location where horizontal scanout position should go. | 1820 | * \param *hpos Location where horizontal scanout position should go. |
1773 | * \param *stime Target location for timestamp taken immediately before | 1821 | * \param *stime Target location for timestamp taken immediately before |
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
1911 | vbl_end = 0; | 1959 | vbl_end = 0; |
1912 | } | 1960 | } |
1913 | 1961 | ||
1962 | /* Called from driver internal vblank counter query code? */ | ||
1963 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
1964 | /* Caller wants distance from real vbl_start in *hpos */ | ||
1965 | *hpos = *vpos - vbl_start; | ||
1966 | } | ||
1967 | |||
1968 | /* Fudge vblank to start a few scanlines earlier to handle the | ||
1969 | * problem that vblank irqs fire a few scanlines before start | ||
1970 | * of vblank. Some driver internal callers need the true vblank | ||
1971 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. | ||
1972 | * | ||
1973 | * The cause of the "early" vblank irq is that the irq is triggered | ||
1974 | * by the line buffer logic when the line buffer read position enters | ||
1975 | * the vblank, whereas our crtc scanout position naturally lags the | ||
1976 | * line buffer read position. | ||
1977 | */ | ||
1978 | if (!(flags & USE_REAL_VBLANKSTART)) | ||
1979 | vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; | ||
1980 | |||
1914 | /* Test scanout position against vblank region. */ | 1981 | /* Test scanout position against vblank region. */ |
1915 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | 1982 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
1916 | in_vbl = false; | 1983 | in_vbl = false; |
1917 | 1984 | ||
1985 | /* In vblank? */ | ||
1986 | if (in_vbl) | ||
1987 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
1988 | |||
1989 | /* Called from driver internal vblank counter query code? */ | ||
1990 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
1991 | /* Caller wants distance from fudged earlier vbl_start */ | ||
1992 | *vpos -= vbl_start; | ||
1993 | return ret; | ||
1994 | } | ||
1995 | |||
1918 | /* Check if inside vblank area and apply corrective offsets: | 1996 | /* Check if inside vblank area and apply corrective offsets: |
1919 | * vpos will then be >=0 in video scanout area, but negative | 1997 | * vpos will then be >=0 in video scanout area, but negative |
1920 | * within vblank area, counting down the number of lines until | 1998 | * within vblank area, counting down the number of lines until |
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
1930 | /* Correct for shifted end of vbl at vbl_end. */ | 2008 | /* Correct for shifted end of vbl at vbl_end. */ |
1931 | *vpos = *vpos - vbl_end; | 2009 | *vpos = *vpos - vbl_end; |
1932 | 2010 | ||
1933 | /* In vblank? */ | ||
1934 | if (in_vbl) | ||
1935 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
1936 | |||
1937 | /* Is vpos outside nominal vblank area, but less than | ||
1938 | * 1/100 of a frame height away from start of vblank? | ||
1939 | * If so, assume this isn't a massively delayed vblank | ||
1940 | * interrupt, but a vblank interrupt that fired a few | ||
1941 | * microseconds before true start of vblank. Compensate | ||
1942 | * by adding a full frame duration to the final timestamp. | ||
1943 | * Happens, e.g., on ATI R500, R600. | ||
1944 | * | ||
1945 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
1946 | */ | ||
1947 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
1948 | vbl_start = mode->crtc_vdisplay; | ||
1949 | vtotal = mode->crtc_vtotal; | ||
1950 | |||
1951 | if (vbl_start - *vpos < vtotal / 100) { | ||
1952 | *vpos -= vtotal; | ||
1953 | |||
1954 | /* Signal this correction as "applied". */ | ||
1955 | ret |= 0x8; | ||
1956 | } | ||
1957 | } | ||
1958 | |||
1959 | return ret; | 2011 | return ret; |
1960 | } | 2012 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 171d3e43c30c..979f3bf65f2c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) | |||
74 | static void radeon_hotplug_work_func(struct work_struct *work) | 74 | static void radeon_hotplug_work_func(struct work_struct *work) |
75 | { | 75 | { |
76 | struct radeon_device *rdev = container_of(work, struct radeon_device, | 76 | struct radeon_device *rdev = container_of(work, struct radeon_device, |
77 | hotplug_work); | 77 | hotplug_work.work); |
78 | struct drm_device *dev = rdev->ddev; | 78 | struct drm_device *dev = rdev->ddev; |
79 | struct drm_mode_config *mode_config = &dev->mode_config; | 79 | struct drm_mode_config *mode_config = &dev->mode_config; |
80 | struct drm_connector *connector; | 80 | struct drm_connector *connector; |
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
302 | } | 302 | } |
303 | } | 303 | } |
304 | 304 | ||
305 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | 305 | INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); |
306 | INIT_WORK(&rdev->dp_work, radeon_dp_work_func); | 306 | INIT_WORK(&rdev->dp_work, radeon_dp_work_func); |
307 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); | 307 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); |
308 | 308 | ||
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
310 | r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); | 310 | r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); |
311 | if (r) { | 311 | if (r) { |
312 | rdev->irq.installed = false; | 312 | rdev->irq.installed = false; |
313 | flush_work(&rdev->hotplug_work); | 313 | flush_delayed_work(&rdev->hotplug_work); |
314 | return r; | 314 | return r; |
315 | } | 315 | } |
316 | 316 | ||
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
333 | rdev->irq.installed = false; | 333 | rdev->irq.installed = false; |
334 | if (rdev->msi_enabled) | 334 | if (rdev->msi_enabled) |
335 | pci_disable_msi(rdev->pdev); | 335 | pci_disable_msi(rdev->pdev); |
336 | flush_work(&rdev->hotplug_work); | 336 | flush_delayed_work(&rdev->hotplug_work); |
337 | } | 337 | } |
338 | } | 338 | } |
339 | 339 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0ec6fcca16d3..d290a8a09036 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
755 | */ | 755 | */ |
756 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | 756 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) |
757 | { | 757 | { |
758 | int vpos, hpos, stat; | ||
759 | u32 count; | ||
758 | struct radeon_device *rdev = dev->dev_private; | 760 | struct radeon_device *rdev = dev->dev_private; |
759 | 761 | ||
760 | if (crtc < 0 || crtc >= rdev->num_crtc) { | 762 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | |||
762 | return -EINVAL; | 764 | return -EINVAL; |
763 | } | 765 | } |
764 | 766 | ||
765 | return radeon_get_vblank_counter(rdev, crtc); | 767 | /* The hw increments its frame counter at start of vsync, not at start |
768 | * of vblank, as is required by DRM core vblank counter handling. | ||
769 | * Cook the hw count here to make it appear to the caller as if it | ||
770 | * incremented at start of vblank. We measure distance to start of | ||
771 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank | ||
772 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter | ||
773 | * result by 1 to give the proper appearance to caller. | ||
774 | */ | ||
775 | if (rdev->mode_info.crtcs[crtc]) { | ||
776 | /* Repeat readout if needed to provide stable result if | ||
777 | * we cross start of vsync during the queries. | ||
778 | */ | ||
779 | do { | ||
780 | count = radeon_get_vblank_counter(rdev, crtc); | ||
781 | /* Ask radeon_get_crtc_scanoutpos to return vpos as | ||
782 | * distance to start of vblank, instead of regular | ||
783 | * vertical scanout pos. | ||
784 | */ | ||
785 | stat = radeon_get_crtc_scanoutpos( | ||
786 | dev, crtc, GET_DISTANCE_TO_VBLANKSTART, | ||
787 | &vpos, &hpos, NULL, NULL, | ||
788 | &rdev->mode_info.crtcs[crtc]->base.hwmode); | ||
789 | } while (count != radeon_get_vblank_counter(rdev, crtc)); | ||
790 | |||
791 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
792 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { | ||
793 | DRM_DEBUG_VBL("Query failed! stat %d\n", stat); | ||
794 | } | ||
795 | else { | ||
796 | DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", | ||
797 | crtc, vpos); | ||
798 | |||
799 | /* Bump counter if we are at >= leading edge of vblank, | ||
800 | * but before vsync where vpos would turn negative and | ||
801 | * the hw counter really increments. | ||
802 | */ | ||
803 | if (vpos >= 0) | ||
804 | count++; | ||
805 | } | ||
806 | } | ||
807 | else { | ||
808 | /* Fallback to use value as is. */ | ||
809 | count = radeon_get_vblank_counter(rdev, crtc); | ||
810 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); | ||
811 | } | ||
812 | |||
813 | return count; | ||
766 | } | 814 | } |
767 | 815 | ||
768 | /** | 816 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 830e171c3a9e..bba112628b47 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -367,6 +367,7 @@ struct radeon_crtc { | |||
367 | u32 line_time; | 367 | u32 line_time; |
368 | u32 wm_low; | 368 | u32 wm_low; |
369 | u32 wm_high; | 369 | u32 wm_high; |
370 | u32 lb_vblank_lead_lines; | ||
370 | struct drm_display_mode hw_mode; | 371 | struct drm_display_mode hw_mode; |
371 | enum radeon_output_csc output_csc; | 372 | enum radeon_output_csc output_csc; |
372 | }; | 373 | }; |
@@ -553,6 +554,7 @@ struct radeon_connector { | |||
553 | void *con_priv; | 554 | void *con_priv; |
554 | bool dac_load_detect; | 555 | bool dac_load_detect; |
555 | bool detected_by_load; /* if the connection status was determined by load */ | 556 | bool detected_by_load; /* if the connection status was determined by load */ |
557 | bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ | ||
556 | uint16_t connector_object_id; | 558 | uint16_t connector_object_id; |
557 | struct radeon_hpd hpd; | 559 | struct radeon_hpd hpd; |
558 | struct radeon_router router; | 560 | struct radeon_router router; |
@@ -686,6 +688,9 @@ struct atom_voltage_table | |||
686 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; | 688 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; |
687 | }; | 689 | }; |
688 | 690 | ||
691 | /* Driver internal use only flags of radeon_get_crtc_scanoutpos() */ | ||
692 | #define USE_REAL_VBLANKSTART (1 << 30) | ||
693 | #define GET_DISTANCE_TO_VBLANKSTART (1 << 31) | ||
689 | 694 | ||
690 | extern void | 695 | extern void |
691 | radeon_add_atom_connector(struct drm_device *dev, | 696 | radeon_add_atom_connector(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f4f03dcc1530..59abebd6b5dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) | |||
1756 | */ | 1756 | */ |
1757 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { | 1757 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
1758 | if (rdev->pm.active_crtcs & (1 << crtc)) { | 1758 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
1759 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, | 1759 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, |
1760 | crtc, | ||
1761 | USE_REAL_VBLANKSTART, | ||
1760 | &vpos, &hpos, NULL, NULL, | 1762 | &vpos, &hpos, NULL, NULL, |
1761 | &rdev->mode_info.crtcs[crtc]->base.hwmode); | 1763 | &rdev->mode_info.crtcs[crtc]->base.hwmode); |
1762 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && | 1764 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 574f62bbd215..7eb1ae758906 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -361,31 +361,31 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring, | |||
361 | 361 | ||
362 | /* stitch together an VCE create msg */ | 362 | /* stitch together an VCE create msg */ |
363 | ib.length_dw = 0; | 363 | ib.length_dw = 0; |
364 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | 364 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
365 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | 365 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
366 | ib.ptr[ib.length_dw++] = handle; | 366 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
367 | 367 | ||
368 | ib.ptr[ib.length_dw++] = 0x00000030; /* len */ | 368 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000030); /* len */ |
369 | ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */ | 369 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x01000001); /* create cmd */ |
370 | ib.ptr[ib.length_dw++] = 0x00000000; | 370 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
371 | ib.ptr[ib.length_dw++] = 0x00000042; | 371 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000042); |
372 | ib.ptr[ib.length_dw++] = 0x0000000a; | 372 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000a); |
373 | ib.ptr[ib.length_dw++] = 0x00000001; | 373 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
374 | ib.ptr[ib.length_dw++] = 0x00000080; | 374 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000080); |
375 | ib.ptr[ib.length_dw++] = 0x00000060; | 375 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000060); |
376 | ib.ptr[ib.length_dw++] = 0x00000100; | 376 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
377 | ib.ptr[ib.length_dw++] = 0x00000100; | 377 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000100); |
378 | ib.ptr[ib.length_dw++] = 0x0000000c; | 378 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); |
379 | ib.ptr[ib.length_dw++] = 0x00000000; | 379 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000000); |
380 | 380 | ||
381 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | 381 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
382 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | 382 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
383 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | 383 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
384 | ib.ptr[ib.length_dw++] = dummy; | 384 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
385 | ib.ptr[ib.length_dw++] = 0x00000001; | 385 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
386 | 386 | ||
387 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 387 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
388 | ib.ptr[i] = 0x0; | 388 | ib.ptr[i] = cpu_to_le32(0x0); |
389 | 389 | ||
390 | r = radeon_ib_schedule(rdev, &ib, NULL, false); | 390 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
391 | if (r) { | 391 | if (r) { |
@@ -428,21 +428,21 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
428 | 428 | ||
429 | /* stitch together an VCE destroy msg */ | 429 | /* stitch together an VCE destroy msg */ |
430 | ib.length_dw = 0; | 430 | ib.length_dw = 0; |
431 | ib.ptr[ib.length_dw++] = 0x0000000c; /* len */ | 431 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x0000000c); /* len */ |
432 | ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */ | 432 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); /* session cmd */ |
433 | ib.ptr[ib.length_dw++] = handle; | 433 | ib.ptr[ib.length_dw++] = cpu_to_le32(handle); |
434 | 434 | ||
435 | ib.ptr[ib.length_dw++] = 0x00000014; /* len */ | 435 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000014); /* len */ |
436 | ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */ | 436 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x05000005); /* feedback buffer */ |
437 | ib.ptr[ib.length_dw++] = upper_32_bits(dummy); | 437 | ib.ptr[ib.length_dw++] = cpu_to_le32(upper_32_bits(dummy)); |
438 | ib.ptr[ib.length_dw++] = dummy; | 438 | ib.ptr[ib.length_dw++] = cpu_to_le32(dummy); |
439 | ib.ptr[ib.length_dw++] = 0x00000001; | 439 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000001); |
440 | 440 | ||
441 | ib.ptr[ib.length_dw++] = 0x00000008; /* len */ | 441 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x00000008); /* len */ |
442 | ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */ | 442 | ib.ptr[ib.length_dw++] = cpu_to_le32(0x02000001); /* destroy cmd */ |
443 | 443 | ||
444 | for (i = ib.length_dw; i < ib_size_dw; ++i) | 444 | for (i = ib.length_dw; i < ib_size_dw; ++i) |
445 | ib.ptr[i] = 0x0; | 445 | ib.ptr[i] = cpu_to_le32(0x0); |
446 | 446 | ||
447 | r = radeon_ib_schedule(rdev, &ib, NULL, false); | 447 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
448 | if (r) { | 448 | if (r) { |
@@ -699,12 +699,12 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |||
699 | { | 699 | { |
700 | uint64_t addr = semaphore->gpu_addr; | 700 | uint64_t addr = semaphore->gpu_addr; |
701 | 701 | ||
702 | radeon_ring_write(ring, VCE_CMD_SEMAPHORE); | 702 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_SEMAPHORE)); |
703 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | 703 | radeon_ring_write(ring, cpu_to_le32((addr >> 3) & 0x000FFFFF)); |
704 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | 704 | radeon_ring_write(ring, cpu_to_le32((addr >> 23) & 0x000FFFFF)); |
705 | radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0)); | 705 | radeon_ring_write(ring, cpu_to_le32(0x01003000 | (emit_wait ? 1 : 0))); |
706 | if (!emit_wait) | 706 | if (!emit_wait) |
707 | radeon_ring_write(ring, VCE_CMD_END); | 707 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
708 | 708 | ||
709 | return true; | 709 | return true; |
710 | } | 710 | } |
@@ -719,10 +719,10 @@ bool radeon_vce_semaphore_emit(struct radeon_device *rdev, | |||
719 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 719 | void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
720 | { | 720 | { |
721 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | 721 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
722 | radeon_ring_write(ring, VCE_CMD_IB); | 722 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_IB)); |
723 | radeon_ring_write(ring, ib->gpu_addr); | 723 | radeon_ring_write(ring, cpu_to_le32(ib->gpu_addr)); |
724 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr)); | 724 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(ib->gpu_addr))); |
725 | radeon_ring_write(ring, ib->length_dw); | 725 | radeon_ring_write(ring, cpu_to_le32(ib->length_dw)); |
726 | } | 726 | } |
727 | 727 | ||
728 | /** | 728 | /** |
@@ -738,12 +738,12 @@ void radeon_vce_fence_emit(struct radeon_device *rdev, | |||
738 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | 738 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
739 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; | 739 | uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; |
740 | 740 | ||
741 | radeon_ring_write(ring, VCE_CMD_FENCE); | 741 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_FENCE)); |
742 | radeon_ring_write(ring, addr); | 742 | radeon_ring_write(ring, cpu_to_le32(addr)); |
743 | radeon_ring_write(ring, upper_32_bits(addr)); | 743 | radeon_ring_write(ring, cpu_to_le32(upper_32_bits(addr))); |
744 | radeon_ring_write(ring, fence->seq); | 744 | radeon_ring_write(ring, cpu_to_le32(fence->seq)); |
745 | radeon_ring_write(ring, VCE_CMD_TRAP); | 745 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_TRAP)); |
746 | radeon_ring_write(ring, VCE_CMD_END); | 746 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
747 | } | 747 | } |
748 | 748 | ||
749 | /** | 749 | /** |
@@ -765,7 +765,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | |||
765 | ring->idx, r); | 765 | ring->idx, r); |
766 | return r; | 766 | return r; |
767 | } | 767 | } |
768 | radeon_ring_write(ring, VCE_CMD_END); | 768 | radeon_ring_write(ring, cpu_to_le32(VCE_CMD_END)); |
769 | radeon_ring_unlock_commit(rdev, ring, false); | 769 | radeon_ring_unlock_commit(rdev, ring, false); |
770 | 770 | ||
771 | for (i = 0; i < rdev->usec_timeout; i++) { | 771 | for (i = 0; i < rdev->usec_timeout; i++) { |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 97a904835759..6244f4e44e9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
813 | status = rs600_irq_ack(rdev); | 813 | status = rs600_irq_ack(rdev); |
814 | } | 814 | } |
815 | if (queue_hotplug) | 815 | if (queue_hotplug) |
816 | schedule_work(&rdev->hotplug_work); | 816 | schedule_delayed_work(&rdev->hotplug_work, 0); |
817 | if (queue_hdmi) | 817 | if (queue_hdmi) |
818 | schedule_work(&rdev->audio_work); | 818 | schedule_work(&rdev->audio_work); |
819 | if (rdev->msi_enabled) { | 819 | if (rdev->msi_enabled) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 516ca27cfa12..6bc44c24e837 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
207 | { | 207 | { |
208 | u32 tmp; | 208 | u32 tmp; |
209 | 209 | ||
210 | /* Guess line buffer size to be 8192 pixels */ | ||
211 | u32 lb_size = 8192; | ||
212 | |||
210 | /* | 213 | /* |
211 | * Line Buffer Setup | 214 | * Line Buffer Setup |
212 | * There is a single line buffer shared by both display controllers. | 215 | * There is a single line buffer shared by both display controllers. |
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
243 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 246 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
244 | } | 247 | } |
245 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); | 248 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
249 | |||
250 | /* Save number of lines the linebuffer leads before the scanout */ | ||
251 | if (mode1) | ||
252 | rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); | ||
253 | |||
254 | if (mode2) | ||
255 | rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); | ||
246 | } | 256 | } |
247 | 257 | ||
248 | struct rs690_watermark { | 258 | struct rs690_watermark { |
diff --git a/drivers/gpu/drm/radeon/rv730_dpm.c b/drivers/gpu/drm/radeon/rv730_dpm.c index 3f5e1cf138ba..d37ba2cb886e 100644 --- a/drivers/gpu/drm/radeon/rv730_dpm.c +++ b/drivers/gpu/drm/radeon/rv730_dpm.c | |||
@@ -464,7 +464,7 @@ void rv730_stop_dpm(struct radeon_device *rdev) | |||
464 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); | 464 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
465 | 465 | ||
466 | if (result != PPSMC_Result_OK) | 466 | if (result != PPSMC_Result_OK) |
467 | DRM_ERROR("Could not force DPM to low\n"); | 467 | DRM_DEBUG("Could not force DPM to low\n"); |
468 | 468 | ||
469 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | 469 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
470 | 470 | ||
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index b9c770745a7a..e830c8935db0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -193,7 +193,7 @@ void rv770_stop_dpm(struct radeon_device *rdev) | |||
193 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); | 193 | result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled); |
194 | 194 | ||
195 | if (result != PPSMC_Result_OK) | 195 | if (result != PPSMC_Result_OK) |
196 | DRM_ERROR("Could not force DPM to low.\n"); | 196 | DRM_DEBUG("Could not force DPM to low.\n"); |
197 | 197 | ||
198 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); | 198 | WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); |
199 | 199 | ||
@@ -1418,7 +1418,7 @@ int rv770_resume_smc(struct radeon_device *rdev) | |||
1418 | int rv770_set_sw_state(struct radeon_device *rdev) | 1418 | int rv770_set_sw_state(struct radeon_device *rdev) |
1419 | { | 1419 | { |
1420 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) | 1420 | if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK) |
1421 | return -EINVAL; | 1421 | DRM_DEBUG("rv770_set_sw_state failed\n"); |
1422 | return 0; | 1422 | return 0; |
1423 | } | 1423 | } |
1424 | 1424 | ||
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 07037e32dea3..f878d6962da5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev, | |||
2376 | c.full = dfixed_div(c, a); | 2376 | c.full = dfixed_div(c, a); |
2377 | priority_b_mark = dfixed_trunc(c); | 2377 | priority_b_mark = dfixed_trunc(c); |
2378 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | 2378 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
2379 | |||
2380 | /* Save number of lines the linebuffer leads before the scanout */ | ||
2381 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
2379 | } | 2382 | } |
2380 | 2383 | ||
2381 | /* select wm A */ | 2384 | /* select wm A */ |
@@ -6848,7 +6851,7 @@ restart_ih: | |||
6848 | if (queue_dp) | 6851 | if (queue_dp) |
6849 | schedule_work(&rdev->dp_work); | 6852 | schedule_work(&rdev->dp_work); |
6850 | if (queue_hotplug) | 6853 | if (queue_hotplug) |
6851 | schedule_work(&rdev->hotplug_work); | 6854 | schedule_delayed_work(&rdev->hotplug_work, 0); |
6852 | if (queue_thermal && rdev->pm.dpm_enabled) | 6855 | if (queue_thermal && rdev->pm.dpm_enabled) |
6853 | schedule_work(&rdev->pm.dpm.thermal.work); | 6856 | schedule_work(&rdev->pm.dpm.thermal.work); |
6854 | rdev->ih.rptr = rptr; | 6857 | rdev->ih.rptr = rptr; |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 8caea0a33dd8..d908321b94ce 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | |||
67 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). | 67 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). |
68 | */ | 68 | */ |
69 | vma->vm_flags &= ~VM_PFNMAP; | 69 | vma->vm_flags &= ~VM_PFNMAP; |
70 | vma->vm_pgoff = 0; | ||
70 | 71 | ||
71 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | 72 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, |
72 | obj->size, &rk_obj->dma_attrs); | 73 | obj->size, &rk_obj->dma_attrs); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d8ae5e49c44..03c47eeadc81 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = { | |||
374 | .data = &rk3288_vop }, | 374 | .data = &rk3288_vop }, |
375 | {}, | 375 | {}, |
376 | }; | 376 | }; |
377 | MODULE_DEVICE_TABLE(of, vop_driver_dt_match); | ||
377 | 378 | ||
378 | static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) | 379 | static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) |
379 | { | 380 | { |
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane, | |||
959 | val = (dest.y2 - dest.y1 - 1) << 16; | 960 | val = (dest.y2 - dest.y1 - 1) << 16; |
960 | val |= (dest.x2 - dest.x1 - 1) & 0xffff; | 961 | val |= (dest.x2 - dest.x1 - 1) & 0xffff; |
961 | VOP_WIN_SET(vop, win, dsp_info, val); | 962 | VOP_WIN_SET(vop, win, dsp_info, val); |
962 | val = (dsp_sty - 1) << 16; | 963 | val = dsp_sty << 16; |
963 | val |= (dsp_stx - 1) & 0xffff; | 964 | val |= dsp_stx & 0xffff; |
964 | VOP_WIN_SET(vop, win, dsp_st, val); | 965 | VOP_WIN_SET(vop, win, dsp_st, val); |
965 | VOP_WIN_SET(vop, win, rb_swap, rb_swap); | 966 | VOP_WIN_SET(vop, win, rb_swap, rb_swap); |
966 | 967 | ||
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win, | |||
1289 | 1290 | ||
1290 | if (state->event) { | 1291 | if (state->event) { |
1291 | spin_lock_irqsave(&drm->event_lock, flags); | 1292 | spin_lock_irqsave(&drm->event_lock, flags); |
1292 | drm_send_vblank_event(drm, -1, state->event); | 1293 | drm_crtc_send_vblank_event(crtc, state->event); |
1293 | spin_unlock_irqrestore(&drm->event_lock, flags); | 1294 | spin_unlock_irqrestore(&drm->event_lock, flags); |
1294 | } | 1295 | } |
1295 | 1296 | ||
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop) | |||
1575 | return PTR_ERR(vop->dclk); | 1576 | return PTR_ERR(vop->dclk); |
1576 | } | 1577 | } |
1577 | 1578 | ||
1578 | ret = clk_prepare(vop->hclk); | ||
1579 | if (ret < 0) { | ||
1580 | dev_err(vop->dev, "failed to prepare hclk\n"); | ||
1581 | return ret; | ||
1582 | } | ||
1583 | |||
1584 | ret = clk_prepare(vop->dclk); | 1579 | ret = clk_prepare(vop->dclk); |
1585 | if (ret < 0) { | 1580 | if (ret < 0) { |
1586 | dev_err(vop->dev, "failed to prepare dclk\n"); | 1581 | dev_err(vop->dev, "failed to prepare dclk\n"); |
1587 | goto err_unprepare_hclk; | 1582 | return ret; |
1588 | } | 1583 | } |
1589 | 1584 | ||
1590 | ret = clk_prepare(vop->aclk); | 1585 | /* Enable both the hclk and aclk to setup the vop */ |
1586 | ret = clk_prepare_enable(vop->hclk); | ||
1591 | if (ret < 0) { | 1587 | if (ret < 0) { |
1592 | dev_err(vop->dev, "failed to prepare aclk\n"); | 1588 | dev_err(vop->dev, "failed to prepare/enable hclk\n"); |
1593 | goto err_unprepare_dclk; | 1589 | goto err_unprepare_dclk; |
1594 | } | 1590 | } |
1595 | 1591 | ||
1596 | /* | 1592 | ret = clk_prepare_enable(vop->aclk); |
1597 | * enable hclk, so that we can config vop register. | ||
1598 | */ | ||
1599 | ret = clk_enable(vop->hclk); | ||
1600 | if (ret < 0) { | 1593 | if (ret < 0) { |
1601 | dev_err(vop->dev, "failed to prepare aclk\n"); | 1594 | dev_err(vop->dev, "failed to prepare/enable aclk\n"); |
1602 | goto err_unprepare_aclk; | 1595 | goto err_disable_hclk; |
1603 | } | 1596 | } |
1597 | |||
1604 | /* | 1598 | /* |
1605 | * do hclk_reset, reset all vop registers. | 1599 | * do hclk_reset, reset all vop registers. |
1606 | */ | 1600 | */ |
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop) | |||
1608 | if (IS_ERR(ahb_rst)) { | 1602 | if (IS_ERR(ahb_rst)) { |
1609 | dev_err(vop->dev, "failed to get ahb reset\n"); | 1603 | dev_err(vop->dev, "failed to get ahb reset\n"); |
1610 | ret = PTR_ERR(ahb_rst); | 1604 | ret = PTR_ERR(ahb_rst); |
1611 | goto err_disable_hclk; | 1605 | goto err_disable_aclk; |
1612 | } | 1606 | } |
1613 | reset_control_assert(ahb_rst); | 1607 | reset_control_assert(ahb_rst); |
1614 | usleep_range(10, 20); | 1608 | usleep_range(10, 20); |
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop) | |||
1634 | if (IS_ERR(vop->dclk_rst)) { | 1628 | if (IS_ERR(vop->dclk_rst)) { |
1635 | dev_err(vop->dev, "failed to get dclk reset\n"); | 1629 | dev_err(vop->dev, "failed to get dclk reset\n"); |
1636 | ret = PTR_ERR(vop->dclk_rst); | 1630 | ret = PTR_ERR(vop->dclk_rst); |
1637 | goto err_unprepare_aclk; | 1631 | goto err_disable_aclk; |
1638 | } | 1632 | } |
1639 | reset_control_assert(vop->dclk_rst); | 1633 | reset_control_assert(vop->dclk_rst); |
1640 | usleep_range(10, 20); | 1634 | usleep_range(10, 20); |
1641 | reset_control_deassert(vop->dclk_rst); | 1635 | reset_control_deassert(vop->dclk_rst); |
1642 | 1636 | ||
1643 | clk_disable(vop->hclk); | 1637 | clk_disable(vop->hclk); |
1638 | clk_disable(vop->aclk); | ||
1644 | 1639 | ||
1645 | vop->is_enabled = false; | 1640 | vop->is_enabled = false; |
1646 | 1641 | ||
1647 | return 0; | 1642 | return 0; |
1648 | 1643 | ||
1644 | err_disable_aclk: | ||
1645 | clk_disable_unprepare(vop->aclk); | ||
1649 | err_disable_hclk: | 1646 | err_disable_hclk: |
1650 | clk_disable(vop->hclk); | 1647 | clk_disable_unprepare(vop->hclk); |
1651 | err_unprepare_aclk: | ||
1652 | clk_unprepare(vop->aclk); | ||
1653 | err_unprepare_dclk: | 1648 | err_unprepare_dclk: |
1654 | clk_unprepare(vop->dclk); | 1649 | clk_unprepare(vop->dclk); |
1655 | err_unprepare_hclk: | ||
1656 | clk_unprepare(vop->hclk); | ||
1657 | return ret; | 1650 | return ret; |
1658 | } | 1651 | } |
1659 | 1652 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c index 6a954544727f..f154fb1929bd 100644 --- a/drivers/gpu/drm/ttm/ttm_lock.c +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -180,7 +180,7 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | |||
180 | spin_unlock(&lock->lock); | 180 | spin_unlock(&lock->lock); |
181 | } | 181 | } |
182 | } else | 182 | } else |
183 | wait_event(lock->queue, __ttm_read_lock(lock)); | 183 | wait_event(lock->queue, __ttm_write_lock(lock)); |
184 | 184 | ||
185 | return ret; | 185 | return ret; |
186 | } | 186 | } |
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index f545913a56c7..578fe0a9324c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c | |||
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = { | |||
412 | .save = virtio_gpu_conn_save, | 412 | .save = virtio_gpu_conn_save, |
413 | .restore = virtio_gpu_conn_restore, | 413 | .restore = virtio_gpu_conn_restore, |
414 | .detect = virtio_gpu_conn_detect, | 414 | .detect = virtio_gpu_conn_detect, |
415 | .fill_modes = drm_helper_probe_single_connector_modes, | 415 | .fill_modes = drm_helper_probe_single_connector_modes_nomerge, |
416 | .destroy = virtio_gpu_conn_destroy, | 416 | .destroy = virtio_gpu_conn_destroy, |
417 | .reset = drm_atomic_helper_connector_reset, | 417 | .reset = drm_atomic_helper_connector_reset, |
418 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 418 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index a09cf8529b9f..c49812b80dd0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -1233,6 +1233,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
1233 | 1233 | ||
1234 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 1234 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
1235 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 1235 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
1236 | vmw_kms_legacy_hotspot_clear(dev_priv); | ||
1236 | if (unlikely((ret != 0))) { | 1237 | if (unlikely((ret != 0))) { |
1237 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 1238 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
1238 | drm_master_put(&vmw_fp->locked_master); | 1239 | drm_master_put(&vmw_fp->locked_master); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index a8ae9dfb83b7..469cdd520615 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -925,6 +925,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, | |||
925 | uint32_t num_clips); | 925 | uint32_t num_clips); |
926 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 926 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
927 | struct drm_file *file_priv); | 927 | struct drm_file *file_priv); |
928 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); | ||
928 | 929 | ||
929 | int vmw_dumb_create(struct drm_file *file_priv, | 930 | int vmw_dumb_create(struct drm_file *file_priv, |
930 | struct drm_device *dev, | 931 | struct drm_device *dev, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index a8baf5f5e765..b6a0806b06bf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -390,7 +390,7 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, | |||
390 | else if (ctx_id == SVGA3D_INVALID_ID) | 390 | else if (ctx_id == SVGA3D_INVALID_ID) |
391 | ret = vmw_local_fifo_reserve(dev_priv, bytes); | 391 | ret = vmw_local_fifo_reserve(dev_priv, bytes); |
392 | else { | 392 | else { |
393 | WARN_ON("Command buffer has not been allocated.\n"); | 393 | WARN(1, "Command buffer has not been allocated.\n"); |
394 | ret = NULL; | 394 | ret = NULL; |
395 | } | 395 | } |
396 | if (IS_ERR_OR_NULL(ret)) { | 396 | if (IS_ERR_OR_NULL(ret)) { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 9fcd7f82995c..9b4bb9e74d73 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -133,13 +133,19 @@ void vmw_cursor_update_position(struct vmw_private *dev_priv, | |||
133 | vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); | 133 | vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); |
134 | } | 134 | } |
135 | 135 | ||
136 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 136 | |
137 | uint32_t handle, uint32_t width, uint32_t height) | 137 | /* |
138 | * vmw_du_crtc_cursor_set2 - Driver cursor_set2 callback. | ||
139 | */ | ||
140 | int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
141 | uint32_t handle, uint32_t width, uint32_t height, | ||
142 | int32_t hot_x, int32_t hot_y) | ||
138 | { | 143 | { |
139 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | 144 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); |
140 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | 145 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); |
141 | struct vmw_surface *surface = NULL; | 146 | struct vmw_surface *surface = NULL; |
142 | struct vmw_dma_buffer *dmabuf = NULL; | 147 | struct vmw_dma_buffer *dmabuf = NULL; |
148 | s32 hotspot_x, hotspot_y; | ||
143 | int ret; | 149 | int ret; |
144 | 150 | ||
145 | /* | 151 | /* |
@@ -151,6 +157,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
151 | */ | 157 | */ |
152 | drm_modeset_unlock_crtc(crtc); | 158 | drm_modeset_unlock_crtc(crtc); |
153 | drm_modeset_lock_all(dev_priv->dev); | 159 | drm_modeset_lock_all(dev_priv->dev); |
160 | hotspot_x = hot_x + du->hotspot_x; | ||
161 | hotspot_y = hot_y + du->hotspot_y; | ||
154 | 162 | ||
155 | /* A lot of the code assumes this */ | 163 | /* A lot of the code assumes this */ |
156 | if (handle && (width != 64 || height != 64)) { | 164 | if (handle && (width != 64 || height != 64)) { |
@@ -187,31 +195,34 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
187 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | 195 | vmw_dmabuf_unreference(&du->cursor_dmabuf); |
188 | 196 | ||
189 | /* setup new image */ | 197 | /* setup new image */ |
198 | ret = 0; | ||
190 | if (surface) { | 199 | if (surface) { |
191 | /* vmw_user_surface_lookup takes one reference */ | 200 | /* vmw_user_surface_lookup takes one reference */ |
192 | du->cursor_surface = surface; | 201 | du->cursor_surface = surface; |
193 | 202 | ||
194 | du->cursor_surface->snooper.crtc = crtc; | 203 | du->cursor_surface->snooper.crtc = crtc; |
195 | du->cursor_age = du->cursor_surface->snooper.age; | 204 | du->cursor_age = du->cursor_surface->snooper.age; |
196 | vmw_cursor_update_image(dev_priv, surface->snooper.image, | 205 | ret = vmw_cursor_update_image(dev_priv, surface->snooper.image, |
197 | 64, 64, du->hotspot_x, du->hotspot_y); | 206 | 64, 64, hotspot_x, hotspot_y); |
198 | } else if (dmabuf) { | 207 | } else if (dmabuf) { |
199 | /* vmw_user_surface_lookup takes one reference */ | 208 | /* vmw_user_surface_lookup takes one reference */ |
200 | du->cursor_dmabuf = dmabuf; | 209 | du->cursor_dmabuf = dmabuf; |
201 | 210 | ||
202 | ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, | 211 | ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height, |
203 | du->hotspot_x, du->hotspot_y); | 212 | hotspot_x, hotspot_y); |
204 | } else { | 213 | } else { |
205 | vmw_cursor_update_position(dev_priv, false, 0, 0); | 214 | vmw_cursor_update_position(dev_priv, false, 0, 0); |
206 | ret = 0; | ||
207 | goto out; | 215 | goto out; |
208 | } | 216 | } |
209 | 217 | ||
210 | vmw_cursor_update_position(dev_priv, true, | 218 | if (!ret) { |
211 | du->cursor_x + du->hotspot_x, | 219 | vmw_cursor_update_position(dev_priv, true, |
212 | du->cursor_y + du->hotspot_y); | 220 | du->cursor_x + hotspot_x, |
221 | du->cursor_y + hotspot_y); | ||
222 | du->core_hotspot_x = hot_x; | ||
223 | du->core_hotspot_y = hot_y; | ||
224 | } | ||
213 | 225 | ||
214 | ret = 0; | ||
215 | out: | 226 | out: |
216 | drm_modeset_unlock_all(dev_priv->dev); | 227 | drm_modeset_unlock_all(dev_priv->dev); |
217 | drm_modeset_lock_crtc(crtc, crtc->cursor); | 228 | drm_modeset_lock_crtc(crtc, crtc->cursor); |
@@ -239,8 +250,10 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |||
239 | drm_modeset_lock_all(dev_priv->dev); | 250 | drm_modeset_lock_all(dev_priv->dev); |
240 | 251 | ||
241 | vmw_cursor_update_position(dev_priv, shown, | 252 | vmw_cursor_update_position(dev_priv, shown, |
242 | du->cursor_x + du->hotspot_x, | 253 | du->cursor_x + du->hotspot_x + |
243 | du->cursor_y + du->hotspot_y); | 254 | du->core_hotspot_x, |
255 | du->cursor_y + du->hotspot_y + | ||
256 | du->core_hotspot_y); | ||
244 | 257 | ||
245 | drm_modeset_unlock_all(dev_priv->dev); | 258 | drm_modeset_unlock_all(dev_priv->dev); |
246 | drm_modeset_lock_crtc(crtc, crtc->cursor); | 259 | drm_modeset_lock_crtc(crtc, crtc->cursor); |
@@ -334,6 +347,29 @@ err_unreserve: | |||
334 | ttm_bo_unreserve(bo); | 347 | ttm_bo_unreserve(bo); |
335 | } | 348 | } |
336 | 349 | ||
350 | /** | ||
351 | * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots | ||
352 | * | ||
353 | * @dev_priv: Pointer to the device private struct. | ||
354 | * | ||
355 | * Clears all legacy hotspots. | ||
356 | */ | ||
357 | void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) | ||
358 | { | ||
359 | struct drm_device *dev = dev_priv->dev; | ||
360 | struct vmw_display_unit *du; | ||
361 | struct drm_crtc *crtc; | ||
362 | |||
363 | drm_modeset_lock_all(dev); | ||
364 | drm_for_each_crtc(crtc, dev) { | ||
365 | du = vmw_crtc_to_du(crtc); | ||
366 | |||
367 | du->hotspot_x = 0; | ||
368 | du->hotspot_y = 0; | ||
369 | } | ||
370 | drm_modeset_unlock_all(dev); | ||
371 | } | ||
372 | |||
337 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | 373 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) |
338 | { | 374 | { |
339 | struct drm_device *dev = dev_priv->dev; | 375 | struct drm_device *dev = dev_priv->dev; |
@@ -351,7 +387,9 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | |||
351 | du->cursor_age = du->cursor_surface->snooper.age; | 387 | du->cursor_age = du->cursor_surface->snooper.age; |
352 | vmw_cursor_update_image(dev_priv, | 388 | vmw_cursor_update_image(dev_priv, |
353 | du->cursor_surface->snooper.image, | 389 | du->cursor_surface->snooper.image, |
354 | 64, 64, du->hotspot_x, du->hotspot_y); | 390 | 64, 64, |
391 | du->hotspot_x + du->core_hotspot_x, | ||
392 | du->hotspot_y + du->core_hotspot_y); | ||
355 | } | 393 | } |
356 | 394 | ||
357 | mutex_unlock(&dev->mode_config.mutex); | 395 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 782df7ca9794..edd81503516d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -159,6 +159,8 @@ struct vmw_display_unit { | |||
159 | 159 | ||
160 | int hotspot_x; | 160 | int hotspot_x; |
161 | int hotspot_y; | 161 | int hotspot_y; |
162 | s32 core_hotspot_x; | ||
163 | s32 core_hotspot_y; | ||
162 | 164 | ||
163 | unsigned unit; | 165 | unsigned unit; |
164 | 166 | ||
@@ -193,8 +195,9 @@ void vmw_du_crtc_restore(struct drm_crtc *crtc); | |||
193 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, | 195 | void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, |
194 | u16 *r, u16 *g, u16 *b, | 196 | u16 *r, u16 *g, u16 *b, |
195 | uint32_t start, uint32_t size); | 197 | uint32_t start, uint32_t size); |
196 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | 198 | int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, |
197 | uint32_t handle, uint32_t width, uint32_t height); | 199 | uint32_t handle, uint32_t width, uint32_t height, |
200 | int32_t hot_x, int32_t hot_y); | ||
198 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | 201 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); |
199 | int vmw_du_connector_dpms(struct drm_connector *connector, int mode); | 202 | int vmw_du_connector_dpms(struct drm_connector *connector, int mode); |
200 | void vmw_du_connector_save(struct drm_connector *connector); | 203 | void vmw_du_connector_save(struct drm_connector *connector); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index bb63e4d795fa..52caecb4502e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -297,7 +297,7 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
297 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { | 297 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { |
298 | .save = vmw_du_crtc_save, | 298 | .save = vmw_du_crtc_save, |
299 | .restore = vmw_du_crtc_restore, | 299 | .restore = vmw_du_crtc_restore, |
300 | .cursor_set = vmw_du_crtc_cursor_set, | 300 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
301 | .cursor_move = vmw_du_crtc_cursor_move, | 301 | .cursor_move = vmw_du_crtc_cursor_move, |
302 | .gamma_set = vmw_du_crtc_gamma_set, | 302 | .gamma_set = vmw_du_crtc_gamma_set, |
303 | .destroy = vmw_ldu_crtc_destroy, | 303 | .destroy = vmw_ldu_crtc_destroy, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index b96d1ab610c5..13926ff192e3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -533,7 +533,7 @@ out_no_fence: | |||
533 | static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | 533 | static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { |
534 | .save = vmw_du_crtc_save, | 534 | .save = vmw_du_crtc_save, |
535 | .restore = vmw_du_crtc_restore, | 535 | .restore = vmw_du_crtc_restore, |
536 | .cursor_set = vmw_du_crtc_cursor_set, | 536 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
537 | .cursor_move = vmw_du_crtc_cursor_move, | 537 | .cursor_move = vmw_du_crtc_cursor_move, |
538 | .gamma_set = vmw_du_crtc_gamma_set, | 538 | .gamma_set = vmw_du_crtc_gamma_set, |
539 | .destroy = vmw_sou_crtc_destroy, | 539 | .destroy = vmw_sou_crtc_destroy, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index b1fc1c02792d..f823fc3efed7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -1043,7 +1043,7 @@ out_finish: | |||
1043 | static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { | 1043 | static struct drm_crtc_funcs vmw_stdu_crtc_funcs = { |
1044 | .save = vmw_du_crtc_save, | 1044 | .save = vmw_du_crtc_save, |
1045 | .restore = vmw_du_crtc_restore, | 1045 | .restore = vmw_du_crtc_restore, |
1046 | .cursor_set = vmw_du_crtc_cursor_set, | 1046 | .cursor_set2 = vmw_du_crtc_cursor_set2, |
1047 | .cursor_move = vmw_du_crtc_cursor_move, | 1047 | .cursor_move = vmw_du_crtc_cursor_move, |
1048 | .gamma_set = vmw_du_crtc_gamma_set, | 1048 | .gamma_set = vmw_du_crtc_gamma_set, |
1049 | .destroy = vmw_stdu_crtc_destroy, | 1049 | .destroy = vmw_stdu_crtc_destroy, |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index ba47b30d28fa..f2e13eb8339f 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/irqchip/chained_irq.h> | 28 | #include <linux/irqchip/chained_irq.h> |
29 | #include <linux/irqdomain.h> | 29 | #include <linux/irqdomain.h> |
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/of_graph.h> | ||
31 | 32 | ||
32 | #include <drm/drm_fourcc.h> | 33 | #include <drm/drm_fourcc.h> |
33 | 34 | ||
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev) | |||
993 | struct ipu_platform_reg { | 994 | struct ipu_platform_reg { |
994 | struct ipu_client_platformdata pdata; | 995 | struct ipu_client_platformdata pdata; |
995 | const char *name; | 996 | const char *name; |
996 | int reg_offset; | ||
997 | }; | 997 | }; |
998 | 998 | ||
999 | /* These must be in the order of the corresponding device tree port nodes */ | ||
999 | static const struct ipu_platform_reg client_reg[] = { | 1000 | static const struct ipu_platform_reg client_reg[] = { |
1000 | { | 1001 | { |
1001 | .pdata = { | 1002 | .pdata = { |
1003 | .csi = 0, | ||
1004 | .dma[0] = IPUV3_CHANNEL_CSI0, | ||
1005 | .dma[1] = -EINVAL, | ||
1006 | }, | ||
1007 | .name = "imx-ipuv3-camera", | ||
1008 | }, { | ||
1009 | .pdata = { | ||
1010 | .csi = 1, | ||
1011 | .dma[0] = IPUV3_CHANNEL_CSI1, | ||
1012 | .dma[1] = -EINVAL, | ||
1013 | }, | ||
1014 | .name = "imx-ipuv3-camera", | ||
1015 | }, { | ||
1016 | .pdata = { | ||
1002 | .di = 0, | 1017 | .di = 0, |
1003 | .dc = 5, | 1018 | .dc = 5, |
1004 | .dp = IPU_DP_FLOW_SYNC_BG, | 1019 | .dp = IPU_DP_FLOW_SYNC_BG, |
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = { | |||
1015 | .dma[1] = -EINVAL, | 1030 | .dma[1] = -EINVAL, |
1016 | }, | 1031 | }, |
1017 | .name = "imx-ipuv3-crtc", | 1032 | .name = "imx-ipuv3-crtc", |
1018 | }, { | ||
1019 | .pdata = { | ||
1020 | .csi = 0, | ||
1021 | .dma[0] = IPUV3_CHANNEL_CSI0, | ||
1022 | .dma[1] = -EINVAL, | ||
1023 | }, | ||
1024 | .reg_offset = IPU_CM_CSI0_REG_OFS, | ||
1025 | .name = "imx-ipuv3-camera", | ||
1026 | }, { | ||
1027 | .pdata = { | ||
1028 | .csi = 1, | ||
1029 | .dma[0] = IPUV3_CHANNEL_CSI1, | ||
1030 | .dma[1] = -EINVAL, | ||
1031 | }, | ||
1032 | .reg_offset = IPU_CM_CSI1_REG_OFS, | ||
1033 | .name = "imx-ipuv3-camera", | ||
1034 | }, | 1033 | }, |
1035 | }; | 1034 | }; |
1036 | 1035 | ||
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1051 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
1052 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1051 | const struct ipu_platform_reg *reg = &client_reg[i]; |
1053 | struct platform_device *pdev; | 1052 | struct platform_device *pdev; |
1054 | struct resource res; | 1053 | |
1055 | 1054 | pdev = platform_device_alloc(reg->name, id++); | |
1056 | if (reg->reg_offset) { | 1055 | if (!pdev) { |
1057 | memset(&res, 0, sizeof(res)); | 1056 | ret = -ENOMEM; |
1058 | res.flags = IORESOURCE_MEM; | 1057 | goto err_register; |
1059 | res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; | 1058 | } |
1060 | res.end = res.start + PAGE_SIZE - 1; | 1059 | |
1061 | pdev = platform_device_register_resndata(dev, reg->name, | 1060 | pdev->dev.parent = dev; |
1062 | id++, &res, 1, ®->pdata, sizeof(reg->pdata)); | 1061 | |
1063 | } else { | 1062 | /* Associate subdevice with the corresponding port node */ |
1064 | pdev = platform_device_register_data(dev, reg->name, | 1063 | pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i); |
1065 | id++, ®->pdata, sizeof(reg->pdata)); | 1064 | if (!pdev->dev.of_node) { |
1065 | dev_err(dev, "missing port@%d node in %s\n", i, | ||
1066 | dev->of_node->full_name); | ||
1067 | ret = -ENODEV; | ||
1068 | goto err_register; | ||
1066 | } | 1069 | } |
1067 | 1070 | ||
1068 | if (IS_ERR(pdev)) { | 1071 | ret = platform_device_add_data(pdev, ®->pdata, |
1069 | ret = PTR_ERR(pdev); | 1072 | sizeof(reg->pdata)); |
1073 | if (!ret) | ||
1074 | ret = platform_device_add(pdev); | ||
1075 | if (ret) { | ||
1076 | platform_device_put(pdev); | ||
1070 | goto err_register; | 1077 | goto err_register; |
1071 | } | 1078 | } |
1072 | } | 1079 | } |
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index 3166e4bc4eb6..9abcaa53bd25 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c | |||
@@ -395,8 +395,10 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) | |||
395 | set_current_state(interruptible ? | 395 | set_current_state(interruptible ? |
396 | TASK_INTERRUPTIBLE : | 396 | TASK_INTERRUPTIBLE : |
397 | TASK_UNINTERRUPTIBLE); | 397 | TASK_UNINTERRUPTIBLE); |
398 | if (signal_pending(current)) { | 398 | if (interruptible && signal_pending(current)) { |
399 | rc = -EINTR; | 399 | __set_current_state(TASK_RUNNING); |
400 | remove_wait_queue(&vga_wait_queue, &wait); | ||
401 | rc = -ERESTARTSYS; | ||
400 | break; | 402 | break; |
401 | } | 403 | } |
402 | schedule(); | 404 | schedule(); |