aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorArindam Nath <arindam.nath@amd.com>2016-04-12 07:46:15 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-05-04 20:20:23 -0400
commitc036554170fcc2238c32a7edd72c1b61b886428a (patch)
tree2832729c8191be1a811c08b8d17d011dd437be31 /drivers/gpu/drm/amd/amdgpu
parentaeba709a15ad66d3f8a2b38bada9f643ebe3dc04 (diff)
drm/amdgpu: handle more than 10 UVD sessions (v2)
Change History -------------- v2: - Make firmware version check correctly. Firmware versions >= 1.80 should all support 40 UVD instances. - Replace AMDGPU_MAX_UVD_HANDLES with max_handles variable. v1: - The firmware can handle upto 40 UVD sessions. Signed-off-by: Arindam Nath <arindam.nath@amd.com> Signed-off-by: Ayyappa Chandolu <ayyappa.chandolu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
5 files changed, 40 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d1ad7634f351..c9fe2d56cebf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1593,16 +1593,19 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
1593/* 1593/*
1594 * UVD 1594 * UVD
1595 */ 1595 */
1596#define AMDGPU_MAX_UVD_HANDLES 10 1596#define AMDGPU_DEFAULT_UVD_HANDLES 10
1597#define AMDGPU_UVD_STACK_SIZE (1024*1024) 1597#define AMDGPU_MAX_UVD_HANDLES 40
1598#define AMDGPU_UVD_HEAP_SIZE (1024*1024) 1598#define AMDGPU_UVD_STACK_SIZE (200*1024)
1599#define AMDGPU_UVD_FIRMWARE_OFFSET 256 1599#define AMDGPU_UVD_HEAP_SIZE (256*1024)
1600#define AMDGPU_UVD_SESSION_SIZE (50*1024)
1601#define AMDGPU_UVD_FIRMWARE_OFFSET 256
1600 1602
1601struct amdgpu_uvd { 1603struct amdgpu_uvd {
1602 struct amdgpu_bo *vcpu_bo; 1604 struct amdgpu_bo *vcpu_bo;
1603 void *cpu_addr; 1605 void *cpu_addr;
1604 uint64_t gpu_addr; 1606 uint64_t gpu_addr;
1605 void *saved_bo; 1607 void *saved_bo;
1608 unsigned max_handles;
1606 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 1609 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1607 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 1610 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1608 struct delayed_work idle_work; 1611 struct delayed_work idle_work;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 338da80006b6..76ebc109e5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -151,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
151 return r; 151 return r;
152 } 152 }
153 153
154 /* Set the default UVD handles that the firmware can handle */
155 adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
156
154 hdr = (const struct common_firmware_header *)adev->uvd.fw->data; 157 hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
155 family_id = le32_to_cpu(hdr->ucode_version) & 0xff; 158 family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
156 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; 159 version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
@@ -158,8 +161,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
158 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", 161 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
159 version_major, version_minor, family_id); 162 version_major, version_minor, family_id);
160 163
164 /*
165 * Limit the number of UVD handles depending on microcode major
166 * and minor versions. The firmware version which has 40 UVD
167 * instances support is 1.80. So all subsequent versions should
168 * also have the same support.
169 */
170 if ((version_major > 0x01) ||
171 ((version_major == 0x01) && (version_minor >= 0x50)))
172 adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
173
161 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) 174 bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
162 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; 175 + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
176 + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
163 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, 177 r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
164 AMDGPU_GEM_DOMAIN_VRAM, 178 AMDGPU_GEM_DOMAIN_VRAM,
165 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 179 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
@@ -202,7 +216,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
202 return r; 216 return r;
203 } 217 }
204 218
205 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 219 for (i = 0; i < adev->uvd.max_handles; ++i) {
206 atomic_set(&adev->uvd.handles[i], 0); 220 atomic_set(&adev->uvd.handles[i], 0);
207 adev->uvd.filp[i] = NULL; 221 adev->uvd.filp[i] = NULL;
208 } 222 }
@@ -248,7 +262,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
248 if (adev->uvd.vcpu_bo == NULL) 262 if (adev->uvd.vcpu_bo == NULL)
249 return 0; 263 return 0;
250 264
251 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 265 for (i = 0; i < adev->uvd.max_handles; ++i)
252 if (atomic_read(&adev->uvd.handles[i])) 266 if (atomic_read(&adev->uvd.handles[i]))
253 break; 267 break;
254 268
@@ -303,7 +317,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
303 struct amdgpu_ring *ring = &adev->uvd.ring; 317 struct amdgpu_ring *ring = &adev->uvd.ring;
304 int i, r; 318 int i, r;
305 319
306 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 320 for (i = 0; i < adev->uvd.max_handles; ++i) {
307 uint32_t handle = atomic_read(&adev->uvd.handles[i]); 321 uint32_t handle = atomic_read(&adev->uvd.handles[i]);
308 if (handle != 0 && adev->uvd.filp[i] == filp) { 322 if (handle != 0 && adev->uvd.filp[i] == filp) {
309 struct fence *fence; 323 struct fence *fence;
@@ -563,7 +577,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
563 amdgpu_bo_kunmap(bo); 577 amdgpu_bo_kunmap(bo);
564 578
565 /* try to alloc a new handle */ 579 /* try to alloc a new handle */
566 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 580 for (i = 0; i < adev->uvd.max_handles; ++i) {
567 if (atomic_read(&adev->uvd.handles[i]) == handle) { 581 if (atomic_read(&adev->uvd.handles[i]) == handle) {
568 DRM_ERROR("Handle 0x%x already in use!\n", handle); 582 DRM_ERROR("Handle 0x%x already in use!\n", handle);
569 return -EINVAL; 583 return -EINVAL;
@@ -586,7 +600,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
586 return r; 600 return r;
587 601
588 /* validate the handle */ 602 /* validate the handle */
589 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { 603 for (i = 0; i < adev->uvd.max_handles; ++i) {
590 if (atomic_read(&adev->uvd.handles[i]) == handle) { 604 if (atomic_read(&adev->uvd.handles[i]) == handle) {
591 if (adev->uvd.filp[i] != ctx->parser->filp) { 605 if (adev->uvd.filp[i] != ctx->parser->filp) {
592 DRM_ERROR("UVD handle collision detected!\n"); 606 DRM_ERROR("UVD handle collision detected!\n");
@@ -601,7 +615,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
601 615
602 case 2: 616 case 2:
603 /* it's a destroy msg, free the handle */ 617 /* it's a destroy msg, free the handle */
604 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 618 for (i = 0; i < adev->uvd.max_handles; ++i)
605 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); 619 atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
606 amdgpu_bo_kunmap(bo); 620 amdgpu_bo_kunmap(bo);
607 return 0; 621 return 0;
@@ -1013,7 +1027,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1013 1027
1014 fences = amdgpu_fence_count_emitted(&adev->uvd.ring); 1028 fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
1015 1029
1016 for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) 1030 for (i = 0; i < adev->uvd.max_handles; ++i)
1017 if (atomic_read(&adev->uvd.handles[i])) 1031 if (atomic_read(&adev->uvd.handles[i]))
1018 ++handles; 1032 ++handles;
1019 1033
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index cb463753115b..0d6b9e2150cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
559 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 559 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
560 560
561 addr += size; 561 addr += size;
562 size = AMDGPU_UVD_STACK_SIZE >> 3; 562 size = AMDGPU_UVD_HEAP_SIZE >> 3;
563 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); 563 WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
564 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 564 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
565 565
566 addr += size; 566 addr += size;
567 size = AMDGPU_UVD_HEAP_SIZE >> 3; 567 size = (AMDGPU_UVD_STACK_SIZE +
568 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
568 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); 569 WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
569 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 570 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
570 571
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index de459c8000a7..84abf89ef4f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -272,12 +272,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
272 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 272 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
273 273
274 offset += size; 274 offset += size;
275 size = AMDGPU_UVD_STACK_SIZE; 275 size = AMDGPU_UVD_HEAP_SIZE;
276 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 276 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
277 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 277 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
278 278
279 offset += size; 279 offset += size;
280 size = AMDGPU_UVD_HEAP_SIZE; 280 size = AMDGPU_UVD_STACK_SIZE +
281 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
281 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 282 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
282 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 283 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
283 284
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 372d70a0daec..c633b1a26a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -272,18 +272,21 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
272 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 272 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
273 273
274 offset += size; 274 offset += size;
275 size = AMDGPU_UVD_STACK_SIZE; 275 size = AMDGPU_UVD_HEAP_SIZE;
276 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 276 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
277 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 277 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
278 278
279 offset += size; 279 offset += size;
280 size = AMDGPU_UVD_HEAP_SIZE; 280 size = AMDGPU_UVD_STACK_SIZE +
281 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
281 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 282 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
282 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 283 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
283 284
284 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 285 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
285 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 286 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
286 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 287 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
288
289 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
287} 290}
288 291
289#if 0 292#if 0