aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c6
51 files changed, 549 insertions, 443 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
82extern int amdgpu_enable_scheduler; 82extern int amdgpu_enable_scheduler;
83extern int amdgpu_sched_jobs; 83extern int amdgpu_sched_jobs;
84extern int amdgpu_sched_hw_submission; 84extern int amdgpu_sched_hw_submission;
85extern int amdgpu_enable_semaphores;
85 86
86#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 87#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
87#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 88#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
432void amdgpu_fence_driver_fini(struct amdgpu_device *adev); 433void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
433void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); 434void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
434 435
435void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); 436int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
436int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, 437int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
437 struct amdgpu_irq_src *irq_src, 438 struct amdgpu_irq_src *irq_src,
438 unsigned irq_type); 439 unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
890 struct amdgpu_device *adev; 891 struct amdgpu_device *adev;
891 const struct amdgpu_ring_funcs *funcs; 892 const struct amdgpu_ring_funcs *funcs;
892 struct amdgpu_fence_driver fence_drv; 893 struct amdgpu_fence_driver fence_drv;
893 struct amd_gpu_scheduler *scheduler; 894 struct amd_gpu_scheduler sched;
894 895
895 spinlock_t fence_lock; 896 spinlock_t fence_lock;
896 struct mutex *ring_lock; 897 struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
1201 struct amdgpu_irq_src priv_inst_irq; 1202 struct amdgpu_irq_src priv_inst_irq;
1202 /* gfx status */ 1203 /* gfx status */
1203 uint32_t gfx_current_status; 1204 uint32_t gfx_current_status;
1204 /* sync signal for const engine */
1205 unsigned ce_sync_offs;
1206 /* ce ram size*/ 1205 /* ce ram size*/
1207 unsigned ce_ram_size; 1206 unsigned ce_ram_size;
1208}; 1207};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
1274 uint32_t num_ibs; 1273 uint32_t num_ibs;
1275 struct mutex job_lock; 1274 struct mutex job_lock;
1276 struct amdgpu_user_fence uf; 1275 struct amdgpu_user_fence uf;
1277 int (*free_job)(struct amdgpu_job *sched_job); 1276 int (*free_job)(struct amdgpu_job *job);
1278}; 1277};
1278#define to_amdgpu_job(sched_job) \
1279 container_of((sched_job), struct amdgpu_job, base)
1279 1280
1280static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1281static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1281{ 1282{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, 185 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo); 186 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
187 if (r) { 187 if (r) {
188 dev_err(rdev->dev, 188 dev_err(rdev->dev,
189 "failed to allocate BO for amdkfd (%d)\n", r); 189 "failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 77f1d7c6ea3a..9416e0f5c1db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
672 /* disp clock */ 672 /* disp clock */
673 adev->clock.default_dispclk = 673 adev->clock.default_dispclk =
674 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); 674 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
675 if (adev->clock.default_dispclk == 0) 675 /* set a reasonable default for DP */
676 adev->clock.default_dispclk = 54000; /* 540 Mhz */ 676 if (adev->clock.default_dispclk < 53900) {
677 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
678 adev->clock.default_dispclk / 100);
679 adev->clock.default_dispclk = 60000;
680 }
677 adev->clock.dp_extclk = 681 adev->clock.dp_extclk =
678 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 682 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
679 adev->clock.current_dispclk = adev->clock.default_dispclk; 683 adev->clock.current_dispclk = adev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
79 int time; 79 int time;
80 80
81 n = AMDGPU_BENCHMARK_ITERATIONS; 81 n = AMDGPU_BENCHMARK_ITERATIONS;
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj); 82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
83 NULL, &sobj);
83 if (r) { 84 if (r) {
84 goto out_cleanup; 85 goto out_cleanup;
85 } 86 }
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
91 if (r) { 92 if (r) {
92 goto out_cleanup; 93 goto out_cleanup;
93 } 94 }
94 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj); 95 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
96 NULL, &dobj);
95 if (r) { 97 if (r) {
96 goto out_cleanup; 98 goto out_cleanup;
97 } 99 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
86 86
87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); 87 struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, 88 ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 ret = amdgpu_bo_reserve(bo, false); 92 ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
197 197
198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, 198 ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
199 true, domain, flags, 199 true, domain, flags,
200 NULL, &placement, &obj); 200 NULL, &placement, NULL,
201 &obj);
201 if (ret) { 202 if (ret) {
202 DRM_ERROR("(%d) bo create failed\n", ret); 203 DRM_ERROR("(%d) bo create failed\n", ret);
203 return ret; 204 return ret;
@@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
207 return ret; 208 return ret;
208} 209}
209 210
210static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
211 cgs_handle_t *handle)
212{
213 CGS_FUNC_ADEV;
214 int r;
215 uint32_t dma_handle;
216 struct drm_gem_object *obj;
217 struct amdgpu_bo *bo;
218 struct drm_device *dev = adev->ddev;
219 struct drm_file *file_priv = NULL, *priv;
220
221 mutex_lock(&dev->struct_mutex);
222 list_for_each_entry(priv, &dev->filelist, lhead) {
223 rcu_read_lock();
224 if (priv->pid == get_pid(task_pid(current)))
225 file_priv = priv;
226 rcu_read_unlock();
227 if (file_priv)
228 break;
229 }
230 mutex_unlock(&dev->struct_mutex);
231 r = dev->driver->prime_fd_to_handle(dev,
232 file_priv, dmabuf_fd,
233 &dma_handle);
234 spin_lock(&file_priv->table_lock);
235
236 /* Check if we currently have a reference on the object */
237 obj = idr_find(&file_priv->object_idr, dma_handle);
238 if (obj == NULL) {
239 spin_unlock(&file_priv->table_lock);
240 return -EINVAL;
241 }
242 spin_unlock(&file_priv->table_lock);
243 bo = gem_to_amdgpu_bo(obj);
244 *handle = (cgs_handle_t)bo;
245 return 0;
246}
247
248static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) 211static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
249{ 212{
250 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 213 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
809}; 772};
810 773
811static const struct cgs_os_ops amdgpu_cgs_os_ops = { 774static const struct cgs_os_ops amdgpu_cgs_os_ops = {
812 amdgpu_cgs_import_gpu_mem,
813 amdgpu_cgs_add_irq_source, 775 amdgpu_cgs_add_irq_source,
814 amdgpu_cgs_irq_get, 776 amdgpu_cgs_irq_get,
815 amdgpu_cgs_irq_put 777 amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..fd16652aa277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
154{ 154{
155 union drm_amdgpu_cs *cs = data; 155 union drm_amdgpu_cs *cs = data;
156 uint64_t *chunk_array_user; 156 uint64_t *chunk_array_user;
157 uint64_t *chunk_array = NULL; 157 uint64_t *chunk_array;
158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 158 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
159 unsigned size, i; 159 unsigned size;
160 int r = 0; 160 int i;
161 int ret;
161 162
162 if (!cs->in.num_chunks) 163 if (cs->in.num_chunks == 0)
163 goto out; 164 return 0;
165
166 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
167 if (!chunk_array)
168 return -ENOMEM;
164 169
165 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id); 170 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
166 if (!p->ctx) { 171 if (!p->ctx) {
167 r = -EINVAL; 172 ret = -EINVAL;
168 goto out; 173 goto free_chunk;
169 } 174 }
175
170 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle); 176 p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
171 177
172 /* get chunks */ 178 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 179 INIT_LIST_HEAD(&p->validated);
174 chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL); 180 chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
175 if (chunk_array == NULL) {
176 r = -ENOMEM;
177 goto out;
178 }
179
180 chunk_array_user = (uint64_t __user *)(cs->in.chunks);
181 if (copy_from_user(chunk_array, chunk_array_user, 181 if (copy_from_user(chunk_array, chunk_array_user,
182 sizeof(uint64_t)*cs->in.num_chunks)) { 182 sizeof(uint64_t)*cs->in.num_chunks)) {
183 r = -EFAULT; 183 ret = -EFAULT;
184 goto out; 184 goto put_bo_list;
185 } 185 }
186 186
187 p->nchunks = cs->in.num_chunks; 187 p->nchunks = cs->in.num_chunks;
188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), 188 p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
189 GFP_KERNEL); 189 GFP_KERNEL);
190 if (p->chunks == NULL) { 190 if (!p->chunks) {
191 r = -ENOMEM; 191 ret = -ENOMEM;
192 goto out; 192 goto put_bo_list;
193 } 193 }
194 194
195 for (i = 0; i < p->nchunks; i++) { 195 for (i = 0; i < p->nchunks; i++) {
@@ -197,28 +197,30 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
197 struct drm_amdgpu_cs_chunk user_chunk; 197 struct drm_amdgpu_cs_chunk user_chunk;
198 uint32_t __user *cdata; 198 uint32_t __user *cdata;
199 199
200 chunk_ptr = (void __user *)chunk_array[i]; 200 chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
201 if (copy_from_user(&user_chunk, chunk_ptr, 201 if (copy_from_user(&user_chunk, chunk_ptr,
202 sizeof(struct drm_amdgpu_cs_chunk))) { 202 sizeof(struct drm_amdgpu_cs_chunk))) {
203 r = -EFAULT; 203 ret = -EFAULT;
204 goto out; 204 i--;
205 goto free_partial_kdata;
205 } 206 }
206 p->chunks[i].chunk_id = user_chunk.chunk_id; 207 p->chunks[i].chunk_id = user_chunk.chunk_id;
207 p->chunks[i].length_dw = user_chunk.length_dw; 208 p->chunks[i].length_dw = user_chunk.length_dw;
208 209
209 size = p->chunks[i].length_dw; 210 size = p->chunks[i].length_dw;
210 cdata = (void __user *)user_chunk.chunk_data; 211 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
211 p->chunks[i].user_ptr = cdata; 212 p->chunks[i].user_ptr = cdata;
212 213
213 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); 214 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
214 if (p->chunks[i].kdata == NULL) { 215 if (p->chunks[i].kdata == NULL) {
215 r = -ENOMEM; 216 ret = -ENOMEM;
216 goto out; 217 i--;
218 goto free_partial_kdata;
217 } 219 }
218 size *= sizeof(uint32_t); 220 size *= sizeof(uint32_t);
219 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 221 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
220 r = -EFAULT; 222 ret = -EFAULT;
221 goto out; 223 goto free_partial_kdata;
222 } 224 }
223 225
224 switch (p->chunks[i].chunk_id) { 226 switch (p->chunks[i].chunk_id) {
@@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
238 gobj = drm_gem_object_lookup(p->adev->ddev, 240 gobj = drm_gem_object_lookup(p->adev->ddev,
239 p->filp, handle); 241 p->filp, handle);
240 if (gobj == NULL) { 242 if (gobj == NULL) {
241 r = -EINVAL; 243 ret = -EINVAL;
242 goto out; 244 goto free_partial_kdata;
243 } 245 }
244 246
245 p->uf.bo = gem_to_amdgpu_bo(gobj); 247 p->uf.bo = gem_to_amdgpu_bo(gobj);
246 p->uf.offset = fence_data->offset; 248 p->uf.offset = fence_data->offset;
247 } else { 249 } else {
248 r = -EINVAL; 250 ret = -EINVAL;
249 goto out; 251 goto free_partial_kdata;
250 } 252 }
251 break; 253 break;
252 254
@@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
254 break; 256 break;
255 257
256 default: 258 default:
257 r = -EINVAL; 259 ret = -EINVAL;
258 goto out; 260 goto free_partial_kdata;
259 } 261 }
260 } 262 }
261 263
262 264
263 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL); 265 p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
264 if (!p->ibs) 266 if (!p->ibs) {
265 r = -ENOMEM; 267 ret = -ENOMEM;
268 goto free_all_kdata;
269 }
266 270
267out:
268 kfree(chunk_array); 271 kfree(chunk_array);
269 return r; 272 return 0;
273
274free_all_kdata:
275 i = p->nchunks - 1;
276free_partial_kdata:
277 for (; i >= 0; i--)
278 drm_free_large(p->chunks[i].kdata);
279 kfree(p->chunks);
280put_bo_list:
281 if (p->bo_list)
282 amdgpu_bo_list_put(p->bo_list);
283 amdgpu_ctx_put(p->ctx);
284free_chunk:
285 kfree(chunk_array);
286
287 return ret;
270} 288}
271 289
272/* Returns how many bytes TTM can move per IB. 290/* Returns how many bytes TTM can move per IB.
@@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
321 return max(bytes_moved_threshold, 1024*1024ull); 339 return max(bytes_moved_threshold, 1024*1024ull);
322} 340}
323 341
324int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p) 342int amdgpu_cs_list_validate(struct amdgpu_device *adev,
343 struct amdgpu_vm *vm,
344 struct list_head *validated)
325{ 345{
326 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
327 struct amdgpu_vm *vm = &fpriv->vm;
328 struct amdgpu_device *adev = p->adev;
329 struct amdgpu_bo_list_entry *lobj; 346 struct amdgpu_bo_list_entry *lobj;
330 struct list_head duplicates;
331 struct amdgpu_bo *bo; 347 struct amdgpu_bo *bo;
332 u64 bytes_moved = 0, initial_bytes_moved; 348 u64 bytes_moved = 0, initial_bytes_moved;
333 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev); 349 u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
334 int r; 350 int r;
335 351
336 INIT_LIST_HEAD(&duplicates); 352 list_for_each_entry(lobj, validated, tv.head) {
337 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
338 if (unlikely(r != 0)) {
339 return r;
340 }
341
342 list_for_each_entry(lobj, &p->validated, tv.head) {
343 bo = lobj->robj; 353 bo = lobj->robj;
344 if (!bo->pin_count) { 354 if (!bo->pin_count) {
345 u32 domain = lobj->prefered_domains; 355 u32 domain = lobj->prefered_domains;
@@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
373 domain = lobj->allowed_domains; 383 domain = lobj->allowed_domains;
374 goto retry; 384 goto retry;
375 } 385 }
376 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
377 return r; 386 return r;
378 } 387 }
379 } 388 }
@@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
386{ 395{
387 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 396 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
388 struct amdgpu_cs_buckets buckets; 397 struct amdgpu_cs_buckets buckets;
398 struct list_head duplicates;
389 bool need_mmap_lock = false; 399 bool need_mmap_lock = false;
390 int i, r; 400 int i, r;
391 401
@@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
405 if (need_mmap_lock) 415 if (need_mmap_lock)
406 down_read(&current->mm->mmap_sem); 416 down_read(&current->mm->mmap_sem);
407 417
408 r = amdgpu_cs_list_validate(p); 418 INIT_LIST_HEAD(&duplicates);
419 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
420 if (unlikely(r != 0))
421 goto error_reserve;
422
423 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
424 if (r)
425 goto error_validate;
426
427 r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
428
429error_validate:
430 if (r)
431 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
409 432
433error_reserve:
410 if (need_mmap_lock) 434 if (need_mmap_lock)
411 up_read(&current->mm->mmap_sem); 435 up_read(&current->mm->mmap_sem);
412 436
@@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
772 return 0; 796 return 0;
773} 797}
774 798
775static int amdgpu_cs_free_job(struct amdgpu_job *sched_job) 799static int amdgpu_cs_free_job(struct amdgpu_job *job)
776{ 800{
777 int i; 801 int i;
778 if (sched_job->ibs) 802 if (job->ibs)
779 for (i = 0; i < sched_job->num_ibs; i++) 803 for (i = 0; i < job->num_ibs; i++)
780 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 804 amdgpu_ib_free(job->adev, &job->ibs[i]);
781 kfree(sched_job->ibs); 805 kfree(job->ibs);
782 if (sched_job->uf.bo) 806 if (job->uf.bo)
783 drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base); 807 drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
784 return 0; 808 return 0;
785} 809}
786 810
@@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
804 r = amdgpu_cs_parser_init(parser, data); 828 r = amdgpu_cs_parser_init(parser, data);
805 if (r) { 829 if (r) {
806 DRM_ERROR("Failed to initialize parser !\n"); 830 DRM_ERROR("Failed to initialize parser !\n");
807 amdgpu_cs_parser_fini(parser, r, false); 831 kfree(parser);
808 up_read(&adev->exclusive_lock); 832 up_read(&adev->exclusive_lock);
809 r = amdgpu_cs_handle_lockup(adev, r); 833 r = amdgpu_cs_handle_lockup(adev, r);
810 return r; 834 return r;
@@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
842 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 866 job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
843 if (!job) 867 if (!job)
844 return -ENOMEM; 868 return -ENOMEM;
845 job->base.sched = ring->scheduler; 869 job->base.sched = &ring->sched;
846 job->base.s_entity = &parser->ctx->rings[ring->idx].entity; 870 job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
847 job->adev = parser->adev; 871 job->adev = parser->adev;
848 job->ibs = parser->ibs; 872 job->ibs = parser->ibs;
@@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
857 881
858 job->free_job = amdgpu_cs_free_job; 882 job->free_job = amdgpu_cs_free_job;
859 mutex_lock(&job->job_lock); 883 mutex_lock(&job->job_lock);
860 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 884 r = amd_sched_entity_push_job(&job->base);
861 if (r) { 885 if (r) {
862 mutex_unlock(&job->job_lock); 886 mutex_unlock(&job->job_lock);
863 amdgpu_cs_free_job(job); 887 amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
43 for (i = 0; i < adev->num_rings; i++) { 43 for (i = 0; i < adev->num_rings; i++) {
44 struct amd_sched_rq *rq; 44 struct amd_sched_rq *rq;
45 if (kernel) 45 if (kernel)
46 rq = &adev->rings[i]->scheduler->kernel_rq; 46 rq = &adev->rings[i]->sched.kernel_rq;
47 else 47 else
48 rq = &adev->rings[i]->scheduler->sched_rq; 48 rq = &adev->rings[i]->sched.sched_rq;
49 r = amd_sched_entity_init(adev->rings[i]->scheduler, 49 r = amd_sched_entity_init(&adev->rings[i]->sched,
50 &ctx->rings[i].entity, 50 &ctx->rings[i].entity,
51 rq, amdgpu_sched_jobs); 51 rq, amdgpu_sched_jobs);
52 if (r) 52 if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
55 55
56 if (i < adev->num_rings) { 56 if (i < adev->num_rings) {
57 for (j = 0; j < i; j++) 57 for (j = 0; j < i; j++)
58 amd_sched_entity_fini(adev->rings[j]->scheduler, 58 amd_sched_entity_fini(&adev->rings[j]->sched,
59 &ctx->rings[j].entity); 59 &ctx->rings[j].entity);
60 kfree(ctx); 60 kfree(ctx);
61 return r; 61 return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
75 75
76 if (amdgpu_enable_scheduler) { 76 if (amdgpu_enable_scheduler) {
77 for (i = 0; i < adev->num_rings; i++) 77 for (i = 0; i < adev->num_rings; i++)
78 amd_sched_entity_fini(adev->rings[i]->scheduler, 78 amd_sched_entity_fini(&adev->rings[i]->sched,
79 &ctx->rings[i].entity); 79 &ctx->rings[i].entity);
80 } 80 }
81} 81}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 246 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 247 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 248 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
249 NULL, &adev->vram_scratch.robj); 249 NULL, NULL, &adev->vram_scratch.robj);
250 if (r) { 250 if (r) {
251 return r; 251 return r;
252 } 252 }
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
449 449
450 if (adev->wb.wb_obj == NULL) { 450 if (adev->wb.wb_obj == NULL) {
451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, 451 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj); 452 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
453 &adev->wb.wb_obj);
453 if (r) { 454 if (r) {
454 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 455 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
455 return r; 456 return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1650 drm_kms_helper_poll_disable(dev); 1651 drm_kms_helper_poll_disable(dev);
1651 1652
1652 /* turn off display hw */ 1653 /* turn off display hw */
1654 drm_modeset_lock_all(dev);
1653 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1654 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1656 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1655 } 1657 }
1658 drm_modeset_unlock_all(dev);
1656 1659
1657 /* unpin the front buffers */ 1660 /* unpin the front buffers */
1658 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1661 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1747 if (fbcon) { 1750 if (fbcon) {
1748 drm_helper_resume_force_mode(dev); 1751 drm_helper_resume_force_mode(dev);
1749 /* turn on display hw */ 1752 /* turn on display hw */
1753 drm_modeset_lock_all(dev);
1750 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1751 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); 1755 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1752 } 1756 }
1757 drm_modeset_unlock_all(dev);
1753 } 1758 }
1754 1759
1755 drm_kms_helper_poll_enable(dev); 1760 drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index e3d70772b531..dc29ed8145c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
85 /* We borrow the event spin lock for protecting flip_status */ 85 /* We borrow the event spin lock for protecting flip_status */
86 spin_lock_irqsave(&crtc->dev->event_lock, flags); 86 spin_lock_irqsave(&crtc->dev->event_lock, flags);
87 87
88 /* set the proper interrupt */
89 amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
90 /* do the flip (mmio) */ 88 /* do the flip (mmio) */
91 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); 89 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
92 /* set the flip status */ 90 /* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..b190c2a83680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
79int amdgpu_enable_scheduler = 0; 79int amdgpu_enable_scheduler = 0;
80int amdgpu_sched_jobs = 16; 80int amdgpu_sched_jobs = 16;
81int amdgpu_sched_hw_submission = 2; 81int amdgpu_sched_hw_submission = 2;
82int amdgpu_enable_semaphores = 1;
82 83
83MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
84module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
152MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); 153MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
153module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); 154module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
154 155
156MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
157module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
158
155static struct pci_device_id pciidlist[] = { 159static struct pci_device_id pciidlist[] = {
156#ifdef CONFIG_DRM_AMDGPU_CIK 160#ifdef CONFIG_DRM_AMDGPU_CIK
157 /* Kaveri */ 161 /* Kaveri */
@@ -238,11 +242,11 @@ static struct pci_device_id pciidlist[] = {
238 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, 242 {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
239#endif 243#endif
240 /* topaz */ 244 /* topaz */
241 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 245 {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
242 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 246 {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
243 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 247 {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
244 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 248 {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
245 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, 249 {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
246 /* tonga */ 250 /* tonga */
247 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 251 {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
248 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, 252 {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 8a122b1b7786..96290d9cddca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
402 return true; 402 return true;
403 return false; 403 return false;
404} 404}
405
406void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
407{
408 struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
409 struct drm_fb_helper *fb_helper;
410 int ret;
411
412 if (!afbdev)
413 return;
414
415 fb_helper = &afbdev->helper;
416
417 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
418 if (ret)
419 DRM_DEBUG("failed to restore crtc mode\n");
420}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
609 * Init the fence driver for the requested ring (all asics). 609 * Init the fence driver for the requested ring (all asics).
610 * Helper function for amdgpu_fence_driver_init(). 610 * Helper function for amdgpu_fence_driver_init().
611 */ 611 */
612void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) 612int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
613{ 613{
614 int i; 614 int i, r;
615 615
616 ring->fence_drv.cpu_addr = NULL; 616 ring->fence_drv.cpu_addr = NULL;
617 ring->fence_drv.gpu_addr = 0; 617 ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
625 amdgpu_fence_check_lockup); 625 amdgpu_fence_check_lockup);
626 ring->fence_drv.ring = ring; 626 ring->fence_drv.ring = ring;
627 627
628 init_waitqueue_head(&ring->fence_drv.fence_queue);
629
628 if (amdgpu_enable_scheduler) { 630 if (amdgpu_enable_scheduler) {
629 ring->scheduler = amd_sched_create(&amdgpu_sched_ops, 631 r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
630 ring->idx, 632 amdgpu_sched_hw_submission, ring->name);
631 amdgpu_sched_hw_submission, 633 if (r) {
632 (void *)ring->adev); 634 DRM_ERROR("Failed to create scheduler on ring %s.\n",
633 if (!ring->scheduler) 635 ring->name);
634 DRM_ERROR("Failed to create scheduler on ring %d.\n", 636 return r;
635 ring->idx); 637 }
636 } 638 }
639
640 return 0;
637} 641}
638 642
639/** 643/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
681 wake_up_all(&ring->fence_drv.fence_queue); 685 wake_up_all(&ring->fence_drv.fence_queue);
682 amdgpu_irq_put(adev, ring->fence_drv.irq_src, 686 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
683 ring->fence_drv.irq_type); 687 ring->fence_drv.irq_type);
684 if (ring->scheduler) 688 amd_sched_fini(&ring->sched);
685 amd_sched_destroy(ring->scheduler);
686 ring->fence_drv.initialized = false; 689 ring->fence_drv.initialized = false;
687 } 690 }
688 mutex_unlock(&adev->ring_lock); 691 mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
127 r = amdgpu_bo_create(adev, adev->gart.table_size, 127 r = amdgpu_bo_create(adev, adev->gart.table_size,
128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 128 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 129 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
130 NULL, &adev->gart.robj); 130 NULL, NULL, &adev->gart.robj);
131 if (r) { 131 if (r) {
132 return r; 132 return r;
133 } 133 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
69 } 69 }
70 } 70 }
71retry: 71retry:
72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj); 72 r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
73 flags, NULL, NULL, &robj);
73 if (r) { 74 if (r) {
74 if (r != -ERESTARTSYS) { 75 if (r != -ERESTARTSYS) {
75 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { 76 if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
426 &args->data.data_size_bytes, 427 &args->data.data_size_bytes,
427 &args->data.flags); 428 &args->data.flags);
428 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { 429 } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
430 if (args->data.data_size_bytes > sizeof(args->data.data)) {
431 r = -EINVAL;
432 goto unreserve;
433 }
429 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); 434 r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
430 if (!r) 435 if (!r)
431 r = amdgpu_bo_set_metadata(robj, args->data.data, 436 r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
433 args->data.flags); 438 args->data.flags);
434 } 439 }
435 440
441unreserve:
436 amdgpu_bo_unreserve(robj); 442 amdgpu_bo_unreserve(robj);
437out: 443out:
438 drm_gem_object_unreference_unlocked(gobj); 444 drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
454 struct ttm_validate_buffer tv, *entry; 460 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 461 struct amdgpu_bo_list_entry *vm_bos;
456 struct ww_acquire_ctx ticket; 462 struct ww_acquire_ctx ticket;
457 struct list_head list; 463 struct list_head list, duplicates;
458 unsigned domain; 464 unsigned domain;
459 int r; 465 int r;
460 466
461 INIT_LIST_HEAD(&list); 467 INIT_LIST_HEAD(&list);
468 INIT_LIST_HEAD(&duplicates);
462 469
463 tv.bo = &bo_va->bo->tbo; 470 tv.bo = &bo_va->bo->tbo;
464 tv.shared = true; 471 tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
468 if (!vm_bos) 475 if (!vm_bos)
469 return; 476 return;
470 477
471 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 478 /* Provide duplicates to avoid -EALREADY */
479 r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
472 if (r) 480 if (r)
473 goto error_free; 481 goto error_free;
474 482
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
651 int r; 659 int r;
652 660
653 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); 661 args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
654 args->size = args->pitch * args->height; 662 args->size = (u64)args->pitch * args->height;
655 args->size = ALIGN(args->size, PAGE_SIZE); 663 args->size = ALIGN(args->size, PAGE_SIZE);
656 664
657 r = amdgpu_gem_object_create(adev, args->size, 0, 665 r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size, 43 r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
44 PAGE_SIZE, true, 44 PAGE_SIZE, true,
45 AMDGPU_GEM_DOMAIN_GTT, 0, 45 AMDGPU_GEM_DOMAIN_GTT, 0,
46 NULL, &adev->irq.ih.ring_obj); 46 NULL, NULL, &adev->irq.ih.ring_obj);
47 if (r) { 47 if (r) {
48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); 48 DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
49 return r; 49 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
140 */ 140 */
141int amdgpu_irq_postinstall(struct drm_device *dev) 141int amdgpu_irq_postinstall(struct drm_device *dev)
142{ 142{
143 dev->max_vblank_count = 0x001fffff; 143 dev->max_vblank_count = 0x00ffffff;
144 return 0; 144 return 0;
145} 145}
146 146
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..5d11e798230c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 390 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
391 } 391 }
392 case AMDGPU_INFO_READ_MMR_REG: { 392 case AMDGPU_INFO_READ_MMR_REG: {
393 unsigned n, alloc_size = info->read_mmr_reg.count * 4; 393 unsigned n, alloc_size;
394 uint32_t *regs; 394 uint32_t *regs;
395 unsigned se_num = (info->read_mmr_reg.instance >> 395 unsigned se_num = (info->read_mmr_reg.instance >>
396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) & 396 AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) 406 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
407 sh_num = 0xffffffff; 407 sh_num = 0xffffffff;
408 408
409 regs = kmalloc(alloc_size, GFP_KERNEL); 409 regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
410 if (!regs) 410 if (!regs)
411 return -ENOMEM; 411 return -ENOMEM;
412 alloc_size = info->read_mmr_reg.count * sizeof(*regs);
412 413
413 for (i = 0; i < info->read_mmr_reg.count; i++) 414 for (i = 0; i < info->read_mmr_reg.count; i++)
414 if (amdgpu_asic_read_register(adev, se_num, sh_num, 415 if (amdgpu_asic_read_register(adev, se_num, sh_num,
@@ -484,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
484 * Outdated mess for old drm with Xorg being in charge (void function now). 485 * Outdated mess for old drm with Xorg being in charge (void function now).
485 */ 486 */
486/** 487/**
487 * amdgpu_driver_firstopen_kms - drm callback for last close 488 * amdgpu_driver_lastclose_kms - drm callback for last close
488 * 489 *
489 * @dev: drm dev pointer 490 * @dev: drm dev pointer
490 * 491 *
@@ -492,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
492 */ 493 */
493void amdgpu_driver_lastclose_kms(struct drm_device *dev) 494void amdgpu_driver_lastclose_kms(struct drm_device *dev)
494{ 495{
496 struct amdgpu_device *adev = dev->dev_private;
497
498 amdgpu_fbdev_restore_mode(adev);
495 vga_switcheroo_process_delayed_switch(); 499 vga_switcheroo_process_delayed_switch();
496} 500}
497 501
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 64efe5b52e65..7bd470d9ac30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev);
567void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); 567void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
568int amdgpu_fbdev_total_size(struct amdgpu_device *adev); 568int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
569bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); 569bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
570void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
570 571
571void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); 572void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
572 573
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
215 bool kernel, u32 domain, u64 flags, 215 bool kernel, u32 domain, u64 flags,
216 struct sg_table *sg, 216 struct sg_table *sg,
217 struct ttm_placement *placement, 217 struct ttm_placement *placement,
218 struct reservation_object *resv,
218 struct amdgpu_bo **bo_ptr) 219 struct amdgpu_bo **bo_ptr)
219{ 220{
220 struct amdgpu_bo *bo; 221 struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
261 /* Kernel allocation are uninterruptible */ 262 /* Kernel allocation are uninterruptible */
262 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, 263 r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
263 &bo->placement, page_align, !kernel, NULL, 264 &bo->placement, page_align, !kernel, NULL,
264 acc_size, sg, NULL, &amdgpu_ttm_bo_destroy); 265 acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
265 if (unlikely(r != 0)) { 266 if (unlikely(r != 0)) {
266 return r; 267 return r;
267 } 268 }
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
275int amdgpu_bo_create(struct amdgpu_device *adev, 276int amdgpu_bo_create(struct amdgpu_device *adev,
276 unsigned long size, int byte_align, 277 unsigned long size, int byte_align,
277 bool kernel, u32 domain, u64 flags, 278 bool kernel, u32 domain, u64 flags,
278 struct sg_table *sg, struct amdgpu_bo **bo_ptr) 279 struct sg_table *sg,
280 struct reservation_object *resv,
281 struct amdgpu_bo **bo_ptr)
279{ 282{
280 struct ttm_placement placement = {0}; 283 struct ttm_placement placement = {0};
281 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; 284 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
286 amdgpu_ttm_placement_init(adev, &placement, 289 amdgpu_ttm_placement_init(adev, &placement,
287 placements, domain, flags); 290 placements, domain, flags);
288 291
289 return amdgpu_bo_create_restricted(adev, size, byte_align, 292 return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
290 kernel, domain, flags, 293 domain, flags, sg, &placement,
291 sg, 294 resv, bo_ptr);
292 &placement,
293 bo_ptr);
294} 295}
295 296
296int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) 297int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
535 if (metadata == NULL) 536 if (metadata == NULL)
536 return -EINVAL; 537 return -EINVAL;
537 538
538 buffer = kzalloc(metadata_size, GFP_KERNEL); 539 buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
539 if (buffer == NULL) 540 if (buffer == NULL)
540 return -ENOMEM; 541 return -ENOMEM;
541 542
542 memcpy(buffer, metadata, metadata_size);
543
544 kfree(bo->metadata); 543 kfree(bo->metadata);
545 bo->metadata_flags = flags; 544 bo->metadata_flags = flags;
546 bo->metadata = buffer; 545 bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
129 unsigned long size, int byte_align, 129 unsigned long size, int byte_align,
130 bool kernel, u32 domain, u64 flags, 130 bool kernel, u32 domain, u64 flags,
131 struct sg_table *sg, 131 struct sg_table *sg,
132 struct reservation_object *resv,
132 struct amdgpu_bo **bo_ptr); 133 struct amdgpu_bo **bo_ptr);
133int amdgpu_bo_create_restricted(struct amdgpu_device *adev, 134int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
134 unsigned long size, int byte_align, 135 unsigned long size, int byte_align,
135 bool kernel, u32 domain, u64 flags, 136 bool kernel, u32 domain, u64 flags,
136 struct sg_table *sg, 137 struct sg_table *sg,
137 struct ttm_placement *placement, 138 struct ttm_placement *placement,
139 struct reservation_object *resv,
138 struct amdgpu_bo **bo_ptr); 140 struct amdgpu_bo **bo_ptr);
139int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 141int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
140void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 142void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
61 struct dma_buf_attachment *attach, 61 struct dma_buf_attachment *attach,
62 struct sg_table *sg) 62 struct sg_table *sg)
63{ 63{
64 struct reservation_object *resv = attach->dmabuf->resv;
64 struct amdgpu_device *adev = dev->dev_private; 65 struct amdgpu_device *adev = dev->dev_private;
65 struct amdgpu_bo *bo; 66 struct amdgpu_bo *bo;
66 int ret; 67 int ret;
67 68
69 ww_mutex_lock(&resv->lock, NULL);
68 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, 70 ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
69 AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); 71 AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
72 ww_mutex_unlock(&resv->lock);
70 if (ret) 73 if (ret)
71 return ERR_PTR(ret); 74 return ERR_PTR(ret);
72 75
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
357 ring->adev = adev; 357 ring->adev = adev;
358 ring->idx = adev->num_rings++; 358 ring->idx = adev->num_rings++;
359 adev->rings[ring->idx] = ring; 359 adev->rings[ring->idx] = ring;
360 amdgpu_fence_driver_init_ring(ring); 360 r = amdgpu_fence_driver_init_ring(ring);
361 if (r)
362 return r;
361 } 363 }
362 364
363 init_waitqueue_head(&ring->fence_drv.fence_queue);
364
365 r = amdgpu_wb_get(adev, &ring->rptr_offs); 365 r = amdgpu_wb_get(adev, &ring->rptr_offs);
366 if (r) { 366 if (r) {
367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); 367 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
407 if (ring->ring_obj == NULL) { 407 if (ring->ring_obj == NULL) {
408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true, 408 r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
409 AMDGPU_GEM_DOMAIN_GTT, 0, 409 AMDGPU_GEM_DOMAIN_GTT, 0,
410 NULL, &ring->ring_obj); 410 NULL, NULL, &ring->ring_obj);
411 if (r) { 411 if (r) {
412 dev_err(adev->dev, "(%d) ring create failed\n", r); 412 dev_err(adev->dev, "(%d) ring create failed\n", r);
413 return r; 413 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
64 INIT_LIST_HEAD(&sa_manager->flist[i]); 64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65 } 65 }
66 66
67 r = amdgpu_bo_create(adev, size, align, true, 67 r = amdgpu_bo_create(adev, size, align, true, domain,
68 domain, 0, NULL, &sa_manager->bo); 68 0, NULL, NULL, &sa_manager->bo);
69 if (r) { 69 if (r) {
70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); 70 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
71 return r; 71 return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
145 struct amd_sched_fence *s_fence; 145 struct amd_sched_fence *s_fence;
146 146
147 s_fence = to_amd_sched_fence(f); 147 s_fence = to_amd_sched_fence(f);
148 if (s_fence) 148 if (s_fence) {
149 return s_fence->scheduler->ring_id; 149 struct amdgpu_ring *ring;
150
151 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
152 return ring->idx;
153 }
154
150 a_fence = to_amdgpu_fence(f); 155 a_fence = to_amdgpu_fence(f);
151 if (a_fence) 156 if (a_fence)
152 return a_fence->ring->idx; 157 return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
412} 417}
413 418
414#if defined(CONFIG_DEBUG_FS) 419#if defined(CONFIG_DEBUG_FS)
420
421static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
422{
423 struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
424 struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
425
426 if (a_fence)
427 seq_printf(m, " protected by 0x%016llx on ring %d",
428 a_fence->seq, a_fence->ring->idx);
429
430 if (s_fence) {
431 struct amdgpu_ring *ring;
432
433
434 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
435 seq_printf(m, " protected by 0x%016x on ring %d",
436 s_fence->base.seqno, ring->idx);
437 }
438}
439
415void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 440void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
416 struct seq_file *m) 441 struct seq_file *m)
417{ 442{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 } 453 }
429 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 454 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
430 soffset, eoffset, eoffset - soffset); 455 soffset, eoffset, eoffset - soffset);
431 if (i->fence) { 456 if (i->fence)
432 struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence); 457 amdgpu_sa_bo_dump_fence(i->fence, m);
433 struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
434 if (a_fence)
435 seq_printf(m, " protected by 0x%016llx on ring %d",
436 a_fence->seq, a_fence->ring->idx);
437 if (s_fence)
438 seq_printf(m, " protected by 0x%016x on ring %d",
439 s_fence->base.seqno,
440 s_fence->scheduler->ring_id);
441
442 }
443 seq_printf(m, "\n"); 458 seq_printf(m, "\n");
444 } 459 }
445 spin_unlock(&sa_manager->wq.lock); 460 spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "amdgpu.h" 28#include "amdgpu.h"
29 29
30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job) 30static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
31{ 31{
32 struct amdgpu_job *sched_job = (struct amdgpu_job *)job; 32 struct amdgpu_job *job = to_amdgpu_job(sched_job);
33 return amdgpu_sync_get_fence(&sched_job->ibs->sync); 33 return amdgpu_sync_get_fence(&job->ibs->sync);
34} 34}
35 35
36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job) 36static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
37{ 37{
38 struct amdgpu_job *sched_job; 38 struct amdgpu_fence *fence = NULL;
39 struct amdgpu_fence *fence; 39 struct amdgpu_job *job;
40 int r; 40 int r;
41 41
42 if (!job) { 42 if (!sched_job) {
43 DRM_ERROR("job is null\n"); 43 DRM_ERROR("job is null\n");
44 return NULL; 44 return NULL;
45 } 45 }
46 sched_job = (struct amdgpu_job *)job; 46 job = to_amdgpu_job(sched_job);
47 mutex_lock(&sched_job->job_lock); 47 mutex_lock(&job->job_lock);
48 r = amdgpu_ib_schedule(sched_job->adev, 48 r = amdgpu_ib_schedule(job->adev,
49 sched_job->num_ibs, 49 job->num_ibs,
50 sched_job->ibs, 50 job->ibs,
51 sched_job->base.owner); 51 job->base.owner);
52 if (r) 52 if (r) {
53 DRM_ERROR("Error scheduling IBs (%d)\n", r);
53 goto err; 54 goto err;
54 fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); 55 }
55
56 if (sched_job->free_job)
57 sched_job->free_job(sched_job);
58 56
59 mutex_unlock(&sched_job->job_lock); 57 fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
60 return &fence->base;
61 58
62err: 59err:
63 DRM_ERROR("Run job error\n"); 60 if (job->free_job)
64 mutex_unlock(&sched_job->job_lock); 61 job->free_job(job);
65 job->sched->ops->process_job(job);
66 return NULL;
67}
68 62
69static void amdgpu_sched_process_job(struct amd_sched_job *job) 63 mutex_unlock(&job->job_lock);
70{ 64 fence_put(&job->base.s_fence->base);
71 struct amdgpu_job *sched_job; 65 kfree(job);
72 66 return fence ? &fence->base : NULL;
73 if (!job) {
74 DRM_ERROR("job is null\n");
75 return;
76 }
77 sched_job = (struct amdgpu_job *)job;
78 /* after processing job, free memory */
79 fence_put(&sched_job->base.s_fence->base);
80 kfree(sched_job);
81} 67}
82 68
83struct amd_sched_backend_ops amdgpu_sched_ops = { 69struct amd_sched_backend_ops amdgpu_sched_ops = {
84 .dependency = amdgpu_sched_dependency, 70 .dependency = amdgpu_sched_dependency,
85 .run_job = amdgpu_sched_run_job, 71 .run_job = amdgpu_sched_run_job,
86 .process_job = amdgpu_sched_process_job
87}; 72};
88 73
89int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, 74int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
100 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); 85 kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
101 if (!job) 86 if (!job)
102 return -ENOMEM; 87 return -ENOMEM;
103 job->base.sched = ring->scheduler; 88 job->base.sched = &ring->sched;
104 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; 89 job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
105 job->adev = adev; 90 job->adev = adev;
106 job->ibs = ibs; 91 job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
109 mutex_init(&job->job_lock); 94 mutex_init(&job->job_lock);
110 job->free_job = free_job; 95 job->free_job = free_job;
111 mutex_lock(&job->job_lock); 96 mutex_lock(&job->job_lock);
112 r = amd_sched_entity_push_job((struct amd_sched_job *)job); 97 r = amd_sched_entity_push_job(&job->base);
113 if (r) { 98 if (r) {
114 mutex_unlock(&job->job_lock); 99 mutex_unlock(&job->job_lock);
115 kfree(job); 100 kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
65 65
66 if (a_fence) 66 if (a_fence)
67 return a_fence->ring->adev == adev; 67 return a_fence->ring->adev == adev;
68 if (s_fence) 68
69 return (struct amdgpu_device *)s_fence->scheduler->priv == adev; 69 if (s_fence) {
70 struct amdgpu_ring *ring;
71
72 ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
73 return ring->adev == adev;
74 }
75
70 return false; 76 return false;
71} 77}
72 78
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
251 fence_put(e->fence); 257 fence_put(e->fence);
252 kfree(e); 258 kfree(e);
253 } 259 }
260
261 if (amdgpu_enable_semaphores)
262 return 0;
263
264 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
265 struct amdgpu_fence *fence = sync->sync_to[i];
266 if (!fence)
267 continue;
268
269 r = fence_wait(&fence->base, false);
270 if (r)
271 return r;
272 }
273
254 return 0; 274 return 0;
255} 275}
256 276
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
285 return -EINVAL; 305 return -EINVAL;
286 } 306 }
287 307
288 if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) { 308 if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
309 (count >= AMDGPU_NUM_SYNCS)) {
289 /* not enough room, wait manually */ 310 /* not enough room, wait manually */
290 r = fence_wait(&fence->base, false); 311 r = fence_wait(&fence->base, false);
291 if (r) 312 if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61
62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, 62 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
63 NULL, &vram_obj); 63 AMDGPU_GEM_DOMAIN_VRAM, 0,
64 NULL, NULL, &vram_obj);
64 if (r) { 65 if (r) {
65 DRM_ERROR("Failed to create VRAM object\n"); 66 DRM_ERROR("Failed to create VRAM object\n");
66 goto out_cleanup; 67 goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
80 struct fence *fence = NULL; 81 struct fence *fence = NULL;
81 82
82 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 83 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
83 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i); 84 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
85 NULL, gtt_obj + i);
84 if (r) { 86 if (r) {
85 DRM_ERROR("Failed to create GTT object %d\n", i); 87 DRM_ERROR("Failed to create GTT object %d\n", i);
86 goto out_lclean; 88 goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, 861 r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
862 AMDGPU_GEM_DOMAIN_VRAM, 862 AMDGPU_GEM_DOMAIN_VRAM,
863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 863 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
864 NULL, &adev->stollen_vga_memory); 864 NULL, NULL, &adev->stollen_vga_memory);
865 if (r) { 865 if (r) {
866 return r; 866 return r;
867 } 867 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
247 const struct common_firmware_header *header = NULL; 247 const struct common_firmware_header *header = NULL;
248 248
249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, 249 err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo); 250 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
251 if (err) { 251 if (err) {
252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); 252 dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
253 err = -ENOMEM; 253 err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 156 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
157 AMDGPU_GEM_DOMAIN_VRAM, 157 AMDGPU_GEM_DOMAIN_VRAM,
158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 158 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
159 NULL, &adev->uvd.vcpu_bo); 159 NULL, NULL, &adev->uvd.vcpu_bo);
160 if (r) { 160 if (r) {
161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); 161 dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
162 return r; 162 return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
543 return -EINVAL; 543 return -EINVAL;
544 } 544 }
545 545
546 if (msg_type == 1) { 546 switch (msg_type) {
547 case 0:
548 /* it's a create msg, calc image size (width * height) */
549 amdgpu_bo_kunmap(bo);
550
551 /* try to alloc a new handle */
552 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
553 if (atomic_read(&adev->uvd.handles[i]) == handle) {
554 DRM_ERROR("Handle 0x%x already in use!\n", handle);
555 return -EINVAL;
556 }
557
558 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
559 adev->uvd.filp[i] = ctx->parser->filp;
560 return 0;
561 }
562 }
563
564 DRM_ERROR("No more free UVD handles!\n");
565 return -EINVAL;
566
567 case 1:
547 /* it's a decode msg, calc buffer sizes */ 568 /* it's a decode msg, calc buffer sizes */
548 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); 569 r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
549 amdgpu_bo_kunmap(bo); 570 amdgpu_bo_kunmap(bo);
550 if (r) 571 if (r)
551 return r; 572 return r;
552 573
553 } else if (msg_type == 2) { 574 /* validate the handle */
575 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
576 if (atomic_read(&adev->uvd.handles[i]) == handle) {
577 if (adev->uvd.filp[i] != ctx->parser->filp) {
578 DRM_ERROR("UVD handle collision detected!\n");
579 return -EINVAL;
580 }
581 return 0;
582 }
583 }
584
585 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
586 return -ENOENT;
587
588 case 2:
554 /* it's a destroy msg, free the handle */ 589 /* it's a destroy msg, free the handle */
555 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 590 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
556 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 591 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
557 amdgpu_bo_kunmap(bo); 592 amdgpu_bo_kunmap(bo);
558 return 0; 593 return 0;
559 } else {
560 /* it's a create msg */
561 amdgpu_bo_kunmap(bo);
562
563 if (msg_type != 0) {
564 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
565 return -EINVAL;
566 }
567
568 /* it's a create msg, no special handling needed */
569 }
570
571 /* create or decode, validate the handle */
572 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
573 if (atomic_read(&adev->uvd.handles[i]) == handle)
574 return 0;
575 }
576 594
577 /* handle not found try to alloc a new one */ 595 default:
578 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 596 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
579 if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { 597 return -EINVAL;
580 adev->uvd.filp[i] = ctx->parser->filp;
581 return 0;
582 }
583 } 598 }
584 599 BUG();
585 DRM_ERROR("No more free UVD handles!\n");
586 return -EINVAL; 600 return -EINVAL;
587} 601}
588 602
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
805} 819}
806 820
807static int amdgpu_uvd_free_job( 821static int amdgpu_uvd_free_job(
808 struct amdgpu_job *sched_job) 822 struct amdgpu_job *job)
809{ 823{
810 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 824 amdgpu_ib_free(job->adev, job->ibs);
811 kfree(sched_job->ibs); 825 kfree(job->ibs);
812 return 0; 826 return 0;
813} 827}
814 828
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
905 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 919 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
906 AMDGPU_GEM_DOMAIN_VRAM, 920 AMDGPU_GEM_DOMAIN_VRAM,
907 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 921 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
908 NULL, &bo); 922 NULL, NULL, &bo);
909 if (r) 923 if (r)
910 return r; 924 return r;
911 925
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
954 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, 968 r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
955 AMDGPU_GEM_DOMAIN_VRAM, 969 AMDGPU_GEM_DOMAIN_VRAM,
956 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 970 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
957 NULL, &bo); 971 NULL, NULL, &bo);
958 if (r) 972 if (r)
959 return r; 973 return r;
960 974
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, 143 r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
144 AMDGPU_GEM_DOMAIN_VRAM, 144 AMDGPU_GEM_DOMAIN_VRAM,
145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 145 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
146 NULL, &adev->vce.vcpu_bo); 146 NULL, NULL, &adev->vce.vcpu_bo);
147 if (r) { 147 if (r) {
148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r); 148 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
149 return r; 149 return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
342} 342}
343 343
344static int amdgpu_vce_free_job( 344static int amdgpu_vce_free_job(
345 struct amdgpu_job *sched_job) 345 struct amdgpu_job *job)
346{ 346{
347 amdgpu_ib_free(sched_job->adev, sched_job->ibs); 347 amdgpu_ib_free(job->adev, job->ibs);
348 kfree(sched_job->ibs); 348 kfree(job->ibs);
349 return 0; 349 return 0;
350} 350}
351 351
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..53d551f2d839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
316 } 316 }
317} 317}
318 318
319int amdgpu_vm_free_job(struct amdgpu_job *sched_job) 319int amdgpu_vm_free_job(struct amdgpu_job *job)
320{ 320{
321 int i; 321 int i;
322 for (i = 0; i < sched_job->num_ibs; i++) 322 for (i = 0; i < job->num_ibs; i++)
323 amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]); 323 amdgpu_ib_free(job->adev, &job->ibs[i]);
324 kfree(sched_job->ibs); 324 kfree(job->ibs);
325 return 0; 325 return 0;
326} 326}
327 327
@@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
455 return -ENOMEM; 455 return -ENOMEM;
456 456
457 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); 457 r = amdgpu_ib_get(ring, NULL, ndw * 4, ib);
458 if (r) 458 if (r) {
459 kfree(ib);
459 return r; 460 return r;
461 }
460 ib->length_dw = 0; 462 ib->length_dw = 0;
461 463
462 /* walk over the address space and update the page directory */ 464 /* walk over the address space and update the page directory */
@@ -686,31 +688,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
686} 688}
687 689
688/** 690/**
689 * amdgpu_vm_fence_pts - fence page tables after an update
690 *
691 * @vm: requested vm
692 * @start: start of GPU address range
693 * @end: end of GPU address range
694 * @fence: fence to use
695 *
696 * Fence the page tables in the range @start - @end (cayman+).
697 *
698 * Global and local mutex must be locked!
699 */
700static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
701 uint64_t start, uint64_t end,
702 struct fence *fence)
703{
704 unsigned i;
705
706 start >>= amdgpu_vm_block_size;
707 end >>= amdgpu_vm_block_size;
708
709 for (i = start; i <= end; ++i)
710 amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
711}
712
713/**
714 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 691 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
715 * 692 *
716 * @adev: amdgpu_device pointer 693 * @adev: amdgpu_device pointer
@@ -813,8 +790,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
813 if (r) 790 if (r)
814 goto error_free; 791 goto error_free;
815 792
816 amdgpu_vm_fence_pts(vm, mapping->it.start, 793 amdgpu_bo_fence(vm->page_directory, f, true);
817 mapping->it.last + 1, f);
818 if (fence) { 794 if (fence) {
819 fence_put(*fence); 795 fence_put(*fence);
820 *fence = fence_get(f); 796 *fence = fence_get(f);
@@ -855,7 +831,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
855 int r; 831 int r;
856 832
857 if (mem) { 833 if (mem) {
858 addr = mem->start << PAGE_SHIFT; 834 addr = (u64)mem->start << PAGE_SHIFT;
859 if (mem->mem_type != TTM_PL_TT) 835 if (mem->mem_type != TTM_PL_TT)
860 addr += adev->vm_manager.vram_base_offset; 836 addr += adev->vm_manager.vram_base_offset;
861 } else { 837 } else {
@@ -1089,6 +1065,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1089 1065
1090 /* walk over the address space and allocate the page tables */ 1066 /* walk over the address space and allocate the page tables */
1091 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { 1067 for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
1068 struct reservation_object *resv = vm->page_directory->tbo.resv;
1092 struct amdgpu_bo *pt; 1069 struct amdgpu_bo *pt;
1093 1070
1094 if (vm->page_tables[pt_idx].bo) 1071 if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1074,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1097 /* drop mutex to allocate and clear page table */ 1074 /* drop mutex to allocate and clear page table */
1098 mutex_unlock(&vm->mutex); 1075 mutex_unlock(&vm->mutex);
1099 1076
1077 ww_mutex_lock(&resv->lock, NULL);
1100 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1078 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1101 AMDGPU_GPU_PAGE_SIZE, true, 1079 AMDGPU_GPU_PAGE_SIZE, true,
1102 AMDGPU_GEM_DOMAIN_VRAM, 1080 AMDGPU_GEM_DOMAIN_VRAM,
1103 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1081 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1104 NULL, &pt); 1082 NULL, resv, &pt);
1083 ww_mutex_unlock(&resv->lock);
1105 if (r) 1084 if (r)
1106 goto error_free; 1085 goto error_free;
1107 1086
@@ -1303,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1303 r = amdgpu_bo_create(adev, pd_size, align, true, 1282 r = amdgpu_bo_create(adev, pd_size, align, true,
1304 AMDGPU_GEM_DOMAIN_VRAM, 1283 AMDGPU_GEM_DOMAIN_VRAM,
1305 AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 1284 AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
1306 NULL, &vm->page_directory); 1285 NULL, NULL, &vm->page_directory);
1307 if (r) 1286 if (r)
1308 return r; 1287 return r;
1309 1288
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); 1279 amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1280 } 1280 }
1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 1281 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1282 amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 1282 amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
1283 ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1284 if (ext_encoder) 1283 if (ext_encoder)
1285 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); 1284 amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
1286 } else { 1285 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 82e8d0730517..a1a35a5df8e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
6185 if (!amdgpu_dpm) 6185 if (!amdgpu_dpm)
6186 return 0; 6186 return 0;
6187 6187
6188 /* init the sysfs and debugfs files late */
6189 ret = amdgpu_pm_sysfs_init(adev);
6190 if (ret)
6191 return ret;
6192
6188 ret = ci_set_temperature_range(adev); 6193 ret = ci_set_temperature_range(adev);
6189 if (ret) 6194 if (ret)
6190 return ret; 6195 return ret;
@@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
6232 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 6237 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6233 if (amdgpu_dpm == 1) 6238 if (amdgpu_dpm == 1)
6234 amdgpu_pm_print_power_states(adev); 6239 amdgpu_pm_print_power_states(adev);
6235 ret = amdgpu_pm_sysfs_init(adev);
6236 if (ret)
6237 goto dpm_failed;
6238 mutex_unlock(&adev->pm.mutex); 6240 mutex_unlock(&adev->pm.mutex);
6239 DRM_INFO("amdgpu: dpm initialized\n"); 6241 DRM_INFO("amdgpu: dpm initialized\n");
6240 6242
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4b6ce74753cd..484710cfdf82 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
1567 int ret, i; 1567 int ret, i;
1568 u16 tmp16; 1568 u16 tmp16;
1569 1569
1570 if (pci_is_root_bus(adev->pdev->bus))
1571 return;
1572
1570 if (amdgpu_pcie_gen2 == 0) 1573 if (amdgpu_pcie_gen2 == 0)
1571 return; 1574 return;
1572 1575
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 44fa96ad4709..2e3373ed4c94 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
596 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 596 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
597 597
598 if (amdgpu_dpm) { 598 if (amdgpu_dpm) {
599 int ret;
600 /* init the sysfs and debugfs files late */
601 ret = amdgpu_pm_sysfs_init(adev);
602 if (ret)
603 return ret;
604
599 /* powerdown unused blocks for now */ 605 /* powerdown unused blocks for now */
600 cz_dpm_powergate_uvd(adev, true); 606 cz_dpm_powergate_uvd(adev, true);
601 cz_dpm_powergate_vce(adev, true); 607 cz_dpm_powergate_vce(adev, true);
@@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
632 if (amdgpu_dpm == 1) 638 if (amdgpu_dpm == 1)
633 amdgpu_pm_print_power_states(adev); 639 amdgpu_pm_print_power_states(adev);
634 640
635 ret = amdgpu_pm_sysfs_init(adev);
636 if (ret)
637 goto dpm_init_failed;
638
639 mutex_unlock(&adev->pm.mutex); 641 mutex_unlock(&adev->pm.mutex);
640 DRM_INFO("amdgpu: dpm initialized\n"); 642 DRM_INFO("amdgpu: dpm initialized\n");
641 643
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
814 * 3. map kernel virtual address 814 * 3. map kernel virtual address
815 */ 815 */
816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, 816 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); 817 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
818 toc_buf);
818 819
819 if (ret) { 820 if (ret) {
820 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); 821 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
822 } 823 }
823 824
824 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, 825 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
825 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); 826 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
827 smu_buf);
826 828
827 if (ret) { 829 if (ret) {
828 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); 830 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index e4d101b1252a..d4c82b625727 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
255 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 255 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
256} 256}
257 257
258static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
259{
260 unsigned i;
261
262 /* Enable pflip interrupts */
263 for (i = 0; i < adev->mode_info.num_crtc; i++)
264 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
265}
266
267static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
268{
269 unsigned i;
270
271 /* Disable pflip interrupts */
272 for (i = 0; i < adev->mode_info.num_crtc; i++)
273 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
274}
275
258/** 276/**
259 * dce_v10_0_page_flip - pageflip callback. 277 * dce_v10_0_page_flip - pageflip callback.
260 * 278 *
@@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2663 dce_v10_0_vga_enable(crtc, true); 2681 dce_v10_0_vga_enable(crtc, true);
2664 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2682 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2665 dce_v10_0_vga_enable(crtc, false); 2683 dce_v10_0_vga_enable(crtc, false);
2666 /* Make sure VBLANK interrupt is still enabled */ 2684 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2667 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2685 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2668 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2686 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2687 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2669 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2688 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2670 dce_v10_0_crtc_load_lut(crtc); 2689 dce_v10_0_crtc_load_lut(crtc);
2671 break; 2690 break;
@@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle)
3025 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3044 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3026 } 3045 }
3027 3046
3047 dce_v10_0_pageflip_interrupt_init(adev);
3048
3028 return 0; 3049 return 0;
3029} 3050}
3030 3051
@@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle)
3039 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3060 dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3040 } 3061 }
3041 3062
3063 dce_v10_0_pageflip_interrupt_fini(adev);
3064
3042 return 0; 3065 return 0;
3043} 3066}
3044 3067
@@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle)
3050 3073
3051 dce_v10_0_hpd_fini(adev); 3074 dce_v10_0_hpd_fini(adev);
3052 3075
3076 dce_v10_0_pageflip_interrupt_fini(adev);
3077
3053 return 0; 3078 return 0;
3054} 3079}
3055 3080
@@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle)
3075 /* initialize hpd */ 3100 /* initialize hpd */
3076 dce_v10_0_hpd_init(adev); 3101 dce_v10_0_hpd_init(adev);
3077 3102
3103 dce_v10_0_pageflip_interrupt_init(adev);
3104
3078 return 0; 3105 return 0;
3079} 3106}
3080 3107
@@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3369 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3396 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3370 3397
3371 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3398 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3372 amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3373 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3399 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3374 3400
3375 return 0; 3401 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 6411e8244671..7e1cf5e4eebf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
233 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 233 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
234} 234}
235 235
236static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
237{
238 unsigned i;
239
240 /* Enable pflip interrupts */
241 for (i = 0; i < adev->mode_info.num_crtc; i++)
242 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
243}
244
245static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
246{
247 unsigned i;
248
249 /* Disable pflip interrupts */
250 for (i = 0; i < adev->mode_info.num_crtc; i++)
251 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
252}
253
236/** 254/**
237 * dce_v11_0_page_flip - pageflip callback. 255 * dce_v11_0_page_flip - pageflip callback.
238 * 256 *
@@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2640 dce_v11_0_vga_enable(crtc, true); 2658 dce_v11_0_vga_enable(crtc, true);
2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2659 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2642 dce_v11_0_vga_enable(crtc, false); 2660 dce_v11_0_vga_enable(crtc, false);
2643 /* Make sure VBLANK interrupt is still enabled */ 2661 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2644 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2662 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2645 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2663 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2664 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2665 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2647 dce_v11_0_crtc_load_lut(crtc); 2666 dce_v11_0_crtc_load_lut(crtc);
2648 break; 2667 break;
@@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
2888 2907
2889 switch (adev->asic_type) { 2908 switch (adev->asic_type) {
2890 case CHIP_CARRIZO: 2909 case CHIP_CARRIZO:
2891 adev->mode_info.num_crtc = 4; 2910 adev->mode_info.num_crtc = 3;
2892 adev->mode_info.num_hpd = 6; 2911 adev->mode_info.num_hpd = 6;
2893 adev->mode_info.num_dig = 9; 2912 adev->mode_info.num_dig = 9;
2894 break; 2913 break;
@@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
3000 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3019 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3001 } 3020 }
3002 3021
3022 dce_v11_0_pageflip_interrupt_init(adev);
3023
3003 return 0; 3024 return 0;
3004} 3025}
3005 3026
@@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
3014 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3035 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3015 } 3036 }
3016 3037
3038 dce_v11_0_pageflip_interrupt_fini(adev);
3039
3017 return 0; 3040 return 0;
3018} 3041}
3019 3042
@@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
3025 3048
3026 dce_v11_0_hpd_fini(adev); 3049 dce_v11_0_hpd_fini(adev);
3027 3050
3051 dce_v11_0_pageflip_interrupt_fini(adev);
3052
3028 return 0; 3053 return 0;
3029} 3054}
3030 3055
@@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
3051 /* initialize hpd */ 3076 /* initialize hpd */
3052 dce_v11_0_hpd_init(adev); 3077 dce_v11_0_hpd_init(adev);
3053 3078
3079 dce_v11_0_pageflip_interrupt_init(adev);
3080
3054 return 0; 3081 return 0;
3055} 3082}
3056 3083
@@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3345 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3372 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3346 3373
3347 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3374 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3348 amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3349 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3375 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3350 3376
3351 return 0; 3377 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c86911c2ea2a..34b9c2a9d8d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 204 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
205} 205}
206 206
207static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
208{
209 unsigned i;
210
211 /* Enable pflip interrupts */
212 for (i = 0; i < adev->mode_info.num_crtc; i++)
213 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
214}
215
216static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
217{
218 unsigned i;
219
220 /* Disable pflip interrupts */
221 for (i = 0; i < adev->mode_info.num_crtc; i++)
222 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
223}
224
207/** 225/**
208 * dce_v8_0_page_flip - pageflip callback. 226 * dce_v8_0_page_flip - pageflip callback.
209 * 227 *
@@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2575 dce_v8_0_vga_enable(crtc, true); 2593 dce_v8_0_vga_enable(crtc, true);
2576 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2594 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2577 dce_v8_0_vga_enable(crtc, false); 2595 dce_v8_0_vga_enable(crtc, false);
2578 /* Make sure VBLANK interrupt is still enabled */ 2596 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2579 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2597 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2580 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2598 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2599 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2581 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2600 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2582 dce_v8_0_crtc_load_lut(crtc); 2601 dce_v8_0_crtc_load_lut(crtc);
2583 break; 2602 break;
@@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
2933 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2952 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2934 } 2953 }
2935 2954
2955 dce_v8_0_pageflip_interrupt_init(adev);
2956
2936 return 0; 2957 return 0;
2937} 2958}
2938 2959
@@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
2947 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 2968 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2948 } 2969 }
2949 2970
2971 dce_v8_0_pageflip_interrupt_fini(adev);
2972
2950 return 0; 2973 return 0;
2951} 2974}
2952 2975
@@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
2958 2981
2959 dce_v8_0_hpd_fini(adev); 2982 dce_v8_0_hpd_fini(adev);
2960 2983
2984 dce_v8_0_pageflip_interrupt_fini(adev);
2985
2961 return 0; 2986 return 0;
2962} 2987}
2963 2988
@@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
2981 /* initialize hpd */ 3006 /* initialize hpd */
2982 dce_v8_0_hpd_init(adev); 3007 dce_v8_0_hpd_init(adev);
2983 3008
3009 dce_v8_0_pageflip_interrupt_init(adev);
3010
2984 return 0; 3011 return 0;
2985} 3012}
2986 3013
@@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3376 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3403 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3377 3404
3378 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3405 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
3379 amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
3380 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); 3406 queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
3381 3407
3382 return 0; 3408 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 764 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
765 true, AMDGPU_GEM_DOMAIN_VRAM, 765 true, AMDGPU_GEM_DOMAIN_VRAM,
766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 766 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
767 NULL, toc_buf); 767 NULL, NULL, toc_buf);
768 if (ret) { 768 if (ret) {
769 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 769 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
770 return -ENOMEM; 770 return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 774 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
775 true, AMDGPU_GEM_DOMAIN_VRAM, 775 true, AMDGPU_GEM_DOMAIN_VRAM,
776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 776 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
777 NULL, smu_buf); 777 NULL, NULL, smu_buf);
778 if (ret) { 778 if (ret) {
779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 779 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
780 return -ENOMEM; 780 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
3206 r = amdgpu_bo_create(adev, 3206 r = amdgpu_bo_create(adev,
3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 3207 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
3208 PAGE_SIZE, true, 3208 PAGE_SIZE, true,
3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3209 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3210 &adev->gfx.mec.hpd_eop_obj); 3210 &adev->gfx.mec.hpd_eop_obj);
3211 if (r) { 3211 if (r) {
3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 3212 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
3373 r = amdgpu_bo_create(adev, 3373 r = amdgpu_bo_create(adev,
3374 sizeof(struct bonaire_mqd), 3374 sizeof(struct bonaire_mqd),
3375 PAGE_SIZE, true, 3375 PAGE_SIZE, true,
3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3376 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
3377 &ring->mqd_obj); 3377 &ring->mqd_obj);
3378 if (r) { 3378 if (r) {
3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3379 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
3610 return 0; 3610 return 0;
3611} 3611}
3612 3612
3613static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
3614{
3615 struct amdgpu_device *adev = ring->adev;
3616 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
3617
3618 /* instruct DE to set a magic number */
3619 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3620 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3621 WRITE_DATA_DST_SEL(5)));
3622 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3623 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3624 amdgpu_ring_write(ring, 1);
3625
3626 /* let CE wait till condition satisfied */
3627 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3628 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3629 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3630 WAIT_REG_MEM_FUNCTION(3) | /* == */
3631 WAIT_REG_MEM_ENGINE(2))); /* ce */
3632 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3633 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3634 amdgpu_ring_write(ring, 1);
3635 amdgpu_ring_write(ring, 0xffffffff);
3636 amdgpu_ring_write(ring, 4); /* poll interval */
3637
3638 /* instruct CE to reset wb of ce_sync to zero */
3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3641 WRITE_DATA_DST_SEL(5) |
3642 WR_CONFIRM));
3643 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
3644 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
3645 amdgpu_ring_write(ring, 0);
3646}
3647
3648/* 3613/*
3649 * vm 3614 * vm
3650 * VMID 0 is the physical GPU addresses as used by the kernel. 3615 * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3663 unsigned vm_id, uint64_t pd_addr) 3628 unsigned vm_id, uint64_t pd_addr)
3664{ 3629{
3665 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3630 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3631 if (usepfp) {
3632 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3633 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3634 amdgpu_ring_write(ring, 0);
3635 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3636 amdgpu_ring_write(ring, 0);
3637 }
3666 3638
3667 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 3639 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3668 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 3640 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3703 amdgpu_ring_write(ring, 0x0); 3675 amdgpu_ring_write(ring, 0x0);
3704 3676
3705 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3677 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3706 gfx_v7_0_ce_sync_me(ring); 3678 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3679 amdgpu_ring_write(ring, 0);
3680 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3681 amdgpu_ring_write(ring, 0);
3707 } 3682 }
3708} 3683}
3709 3684
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3788 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3763 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3789 AMDGPU_GEM_DOMAIN_VRAM, 3764 AMDGPU_GEM_DOMAIN_VRAM,
3790 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3791 NULL, &adev->gfx.rlc.save_restore_obj); 3766 NULL, NULL,
3767 &adev->gfx.rlc.save_restore_obj);
3792 if (r) { 3768 if (r) {
3793 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); 3769 dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
3794 return r; 3770 return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3831 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, 3807 r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
3832 AMDGPU_GEM_DOMAIN_VRAM, 3808 AMDGPU_GEM_DOMAIN_VRAM,
3833 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3809 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3834 NULL, &adev->gfx.rlc.clear_state_obj); 3810 NULL, NULL,
3811 &adev->gfx.rlc.clear_state_obj);
3835 if (r) { 3812 if (r) {
3836 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); 3813 dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
3837 gfx_v7_0_rlc_fini(adev); 3814 gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3870 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, 3847 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
3871 AMDGPU_GEM_DOMAIN_VRAM, 3848 AMDGPU_GEM_DOMAIN_VRAM,
3872 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 3849 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
3873 NULL, &adev->gfx.rlc.cp_table_obj); 3850 NULL, NULL,
3851 &adev->gfx.rlc.cp_table_obj);
3874 if (r) { 3852 if (r) {
3875 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); 3853 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
3876 gfx_v7_0_rlc_fini(adev); 3854 gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
4802 return r; 4780 return r;
4803 } 4781 }
4804 4782
4805 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
4806 if (r) {
4807 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
4808 return r;
4809 }
4810
4811 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 4783 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4812 ring = &adev->gfx.gfx_ring[i]; 4784 ring = &adev->gfx.gfx_ring[i];
4813 ring->ring_obj = NULL; 4785 ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
4851 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 4823 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
4852 PAGE_SIZE, true, 4824 PAGE_SIZE, true,
4853 AMDGPU_GEM_DOMAIN_GDS, 0, 4825 AMDGPU_GEM_DOMAIN_GDS, 0,
4854 NULL, &adev->gds.gds_gfx_bo); 4826 NULL, NULL, &adev->gds.gds_gfx_bo);
4855 if (r) 4827 if (r)
4856 return r; 4828 return r;
4857 4829
4858 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 4830 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
4859 PAGE_SIZE, true, 4831 PAGE_SIZE, true,
4860 AMDGPU_GEM_DOMAIN_GWS, 0, 4832 AMDGPU_GEM_DOMAIN_GWS, 0,
4861 NULL, &adev->gds.gws_gfx_bo); 4833 NULL, NULL, &adev->gds.gws_gfx_bo);
4862 if (r) 4834 if (r)
4863 return r; 4835 return r;
4864 4836
4865 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 4837 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
4866 PAGE_SIZE, true, 4838 PAGE_SIZE, true,
4867 AMDGPU_GEM_DOMAIN_OA, 0, 4839 AMDGPU_GEM_DOMAIN_OA, 0,
4868 NULL, &adev->gds.oa_gfx_bo); 4840 NULL, NULL, &adev->gds.oa_gfx_bo);
4869 if (r) 4841 if (r)
4870 return r; 4842 return r;
4871 4843
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
4886 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4858 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4887 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 4859 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
4888 4860
4889 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
4890
4891 gfx_v7_0_cp_compute_fini(adev); 4861 gfx_v7_0_cp_compute_fini(adev);
4892 gfx_v7_0_rlc_fini(adev); 4862 gfx_v7_0_rlc_fini(adev);
4893 gfx_v7_0_mec_fini(adev); 4863 gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
868 r = amdgpu_bo_create(adev, 868 r = amdgpu_bo_create(adev,
869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, 869 adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
870 PAGE_SIZE, true, 870 PAGE_SIZE, true,
871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 871 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
872 &adev->gfx.mec.hpd_eop_obj); 872 &adev->gfx.mec.hpd_eop_obj);
873 if (r) { 873 if (r) {
874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); 874 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
940 return r; 940 return r;
941 } 941 }
942 942
943 r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
944 if (r) {
945 DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
946 return r;
947 }
948
949 /* set up the gfx ring */ 943 /* set up the gfx ring */
950 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 944 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
951 ring = &adev->gfx.gfx_ring[i]; 945 ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
995 /* reserve GDS, GWS and OA resource for gfx */ 989 /* reserve GDS, GWS and OA resource for gfx */
996 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, 990 r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
997 PAGE_SIZE, true, 991 PAGE_SIZE, true,
998 AMDGPU_GEM_DOMAIN_GDS, 0, 992 AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
999 NULL, &adev->gds.gds_gfx_bo); 993 NULL, &adev->gds.gds_gfx_bo);
1000 if (r) 994 if (r)
1001 return r; 995 return r;
1002 996
1003 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, 997 r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
1004 PAGE_SIZE, true, 998 PAGE_SIZE, true,
1005 AMDGPU_GEM_DOMAIN_GWS, 0, 999 AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
1006 NULL, &adev->gds.gws_gfx_bo); 1000 NULL, &adev->gds.gws_gfx_bo);
1007 if (r) 1001 if (r)
1008 return r; 1002 return r;
1009 1003
1010 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, 1004 r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
1011 PAGE_SIZE, true, 1005 PAGE_SIZE, true,
1012 AMDGPU_GEM_DOMAIN_OA, 0, 1006 AMDGPU_GEM_DOMAIN_OA, 0, NULL,
1013 NULL, &adev->gds.oa_gfx_bo); 1007 NULL, &adev->gds.oa_gfx_bo);
1014 if (r) 1008 if (r)
1015 return r; 1009 return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
1033 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1027 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1034 amdgpu_ring_fini(&adev->gfx.compute_ring[i]); 1028 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1035 1029
1036 amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
1037
1038 gfx_v8_0_mec_fini(adev); 1030 gfx_v8_0_mec_fini(adev);
1039 1031
1040 return 0; 1032 return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3106 sizeof(struct vi_mqd), 3098 sizeof(struct vi_mqd),
3107 PAGE_SIZE, true, 3099 PAGE_SIZE, true,
3108 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, 3100 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
3109 &ring->mqd_obj); 3101 NULL, &ring->mqd_obj);
3110 if (r) { 3102 if (r) {
3111 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); 3103 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
3112 return r; 3104 return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3965 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); 3957 DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3966 amdgpu_ring_write(ring, lower_32_bits(seq)); 3958 amdgpu_ring_write(ring, lower_32_bits(seq));
3967 amdgpu_ring_write(ring, upper_32_bits(seq)); 3959 amdgpu_ring_write(ring, upper_32_bits(seq));
3960
3968} 3961}
3969 3962
3970/** 3963/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
4005 return true; 3998 return true;
4006} 3999}
4007 4000
4008static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) 4001static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4002 unsigned vm_id, uint64_t pd_addr)
4009{ 4003{
4010 struct amdgpu_device *adev = ring->adev; 4004 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4011 u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; 4005 uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
4012 4006 uint64_t addr = ring->fence_drv.gpu_addr;
4013 /* instruct DE to set a magic number */
4014 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4015 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4016 WRITE_DATA_DST_SEL(5)));
4017 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
4018 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4019 amdgpu_ring_write(ring, 1);
4020 4007
4021 /* let CE wait till condition satisfied */
4022 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4008 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4023 amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ 4009 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4024 WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4010 WAIT_REG_MEM_FUNCTION(3))); /* equal */
4025 WAIT_REG_MEM_FUNCTION(3) | /* == */ 4011 amdgpu_ring_write(ring, addr & 0xfffffffc);
4026 WAIT_REG_MEM_ENGINE(2))); /* ce */ 4012 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4027 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4013 amdgpu_ring_write(ring, seq);
4028 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
4029 amdgpu_ring_write(ring, 1);
4030 amdgpu_ring_write(ring, 0xffffffff); 4014 amdgpu_ring_write(ring, 0xffffffff);
4031 amdgpu_ring_write(ring, 4); /* poll interval */ 4015 amdgpu_ring_write(ring, 4); /* poll interval */
4032 4016
4033 /* instruct CE to reset wb of ce_sync to zero */ 4017 if (usepfp) {
4034 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4018 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
4035 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | 4019 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4036 WRITE_DATA_DST_SEL(5) | 4020 amdgpu_ring_write(ring, 0);
4037 WR_CONFIRM)); 4021 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4038 amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); 4022 amdgpu_ring_write(ring, 0);
4039 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); 4023 }
4040 amdgpu_ring_write(ring, 0);
4041}
4042
4043static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4044 unsigned vm_id, uint64_t pd_addr)
4045{
4046 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
4047 4024
4048 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 4025 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4049 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 4026 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
4050 WRITE_DATA_DST_SEL(0))); 4027 WRITE_DATA_DST_SEL(0)) |
4028 WR_CONFIRM);
4051 if (vm_id < 8) { 4029 if (vm_id < 8) {
4052 amdgpu_ring_write(ring, 4030 amdgpu_ring_write(ring,
4053 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); 4031 (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4083 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 4061 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4084 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 4062 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4085 amdgpu_ring_write(ring, 0x0); 4063 amdgpu_ring_write(ring, 0x0);
4086 4064 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4087 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 4065 amdgpu_ring_write(ring, 0);
4088 gfx_v8_0_ce_sync_me(ring); 4066 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4067 amdgpu_ring_write(ring, 0);
4089 } 4068 }
4090} 4069}
4091 4070
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1262 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1263 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1264 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1267
1268 if (!addr && !status)
1269 return 0;
1270
1265 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1271 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1266 entry->src_id, entry->src_data); 1272 entry->src_id, entry->src_data);
1267 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1273 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
1269 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1275 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1270 status); 1276 status);
1271 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); 1277 gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
1272 /* reset addr and status */
1273 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1274 1278
1275 return 0; 1279 return 0;
1276} 1280}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 625 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
626 true, AMDGPU_GEM_DOMAIN_VRAM, 626 true, AMDGPU_GEM_DOMAIN_VRAM,
627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 627 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
628 NULL, toc_buf); 628 NULL, NULL, toc_buf);
629 if (ret) { 629 if (ret) {
630 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 630 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
631 return -ENOMEM; 631 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 94ec04a9c4d5..9745ed3a9aef 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
2995{ 2995{
2996 /* powerdown unused blocks for now */ 2996 /* powerdown unused blocks for now */
2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2997 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2998 int ret;
2999
3000 /* init the sysfs and debugfs files late */
3001 ret = amdgpu_pm_sysfs_init(adev);
3002 if (ret)
3003 return ret;
2998 3004
2999 kv_dpm_powergate_acp(adev, true); 3005 kv_dpm_powergate_acp(adev, true);
3000 kv_dpm_powergate_samu(adev, true); 3006 kv_dpm_powergate_samu(adev, true);
@@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
3038 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; 3044 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
3039 if (amdgpu_dpm == 1) 3045 if (amdgpu_dpm == 1)
3040 amdgpu_pm_print_power_states(adev); 3046 amdgpu_pm_print_power_states(adev);
3041 ret = amdgpu_pm_sysfs_init(adev);
3042 if (ret)
3043 goto dpm_failed;
3044 mutex_unlock(&adev->pm.mutex); 3047 mutex_unlock(&adev->pm.mutex);
3045 DRM_INFO("amdgpu: dpm initialized\n"); 3048 DRM_INFO("amdgpu: dpm initialized\n");
3046 3049
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, 763 ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
764 true, AMDGPU_GEM_DOMAIN_VRAM, 764 true, AMDGPU_GEM_DOMAIN_VRAM,
765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 765 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
766 NULL, toc_buf); 766 NULL, NULL, toc_buf);
767 if (ret) { 767 if (ret) {
768 DRM_ERROR("Failed to allocate memory for TOC buffer\n"); 768 DRM_ERROR("Failed to allocate memory for TOC buffer\n");
769 return -ENOMEM; 769 return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, 773 ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
774 true, AMDGPU_GEM_DOMAIN_VRAM, 774 true, AMDGPU_GEM_DOMAIN_VRAM,
775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 775 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
776 NULL, smu_buf); 776 NULL, NULL, smu_buf);
777 if (ret) { 777 if (ret) {
778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); 778 DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
779 return -ENOMEM; 779 return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
224 int r; 224 int r;
225 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 225 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
226 226
227 r = uvd_v4_2_hw_fini(adev); 227 r = amdgpu_uvd_suspend(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
231 r = amdgpu_uvd_suspend(adev); 231 r = uvd_v4_2_hw_fini(adev);
232 if (r) 232 if (r)
233 return r; 233 return r;
234 234
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
220 int r; 220 int r;
221 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
222 222
223 r = uvd_v5_0_hw_fini(adev); 223 r = amdgpu_uvd_suspend(adev);
224 if (r) 224 if (r)
225 return r; 225 return r;
226 226
227 r = amdgpu_uvd_suspend(adev); 227 r = uvd_v5_0_hw_fini(adev);
228 if (r) 228 if (r)
229 return r; 229 return r;
230 230
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
214 int r; 214 int r;
215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 215 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
216 216
217 /* Skip this for APU for now */
218 if (!(adev->flags & AMD_IS_APU)) {
219 r = amdgpu_uvd_suspend(adev);
220 if (r)
221 return r;
222 }
217 r = uvd_v6_0_hw_fini(adev); 223 r = uvd_v6_0_hw_fini(adev);
218 if (r) 224 if (r)
219 return r; 225 return r;
220 226
221 r = amdgpu_uvd_suspend(adev);
222 if (r)
223 return r;
224
225 return r; 227 return r;
226} 228}
227 229
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
230 int r; 232 int r;
231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
232 234
233 r = amdgpu_uvd_resume(adev); 235 /* Skip this for APU for now */
234 if (r) 236 if (!(adev->flags & AMD_IS_APU)) {
235 return r; 237 r = amdgpu_uvd_resume(adev);
236 238 if (r)
239 return r;
240 }
237 r = uvd_v6_0_hw_init(adev); 241 r = uvd_v6_0_hw_init(adev);
238 if (r) 242 if (r)
239 return r; 243 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..0bac8702e934 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
1005 u32 mask; 1005 u32 mask;
1006 int ret; 1006 int ret;
1007 1007
1008 if (pci_is_root_bus(adev->pdev->bus))
1009 return;
1010
1008 if (amdgpu_pcie_gen2 == 0) 1011 if (amdgpu_pcie_gen2 == 0)
1009 return; 1012 return;
1010 1013
@@ -1400,7 +1403,8 @@ static int vi_common_early_init(void *handle)
1400 case CHIP_CARRIZO: 1403 case CHIP_CARRIZO:
1401 adev->has_uvd = true; 1404 adev->has_uvd = true;
1402 adev->cg_flags = 0; 1405 adev->cg_flags = 0;
1403 adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE; 1406 /* Disable UVD pg */
1407 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1404 adev->external_rev_id = adev->rev_id + 0x1; 1408 adev->external_rev_id = adev->rev_id + 0x1;
1405 if (amdgpu_smc_load_fw && smc_enabled) 1409 if (amdgpu_smc_load_fw && smc_enabled)
1406 adev->firmware.smu_load = true; 1410 adev->firmware.smu_load = true;