aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorChunming Zhou <David1.Zhou@amd.com>2016-07-21 05:20:52 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-08-22 13:47:21 -0400
commit53cdccd5200f5051460e778be11cc371474bc3b5 (patch)
tree19dce02c758d6c4f283f7bfffb1a9a7b70eafdb6 /drivers/gpu/drm/amd/amdgpu
parent0c4e7fa56112b0eaef0bf88b569608620e0465b8 (diff)
drm/amdgpu: recover vram bo from shadow after gpu reset V2
V2: 1. don't directly submit to many jobs at the same time. 2. delete unrelated printk. Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c62
1 files changed, 62 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 76fd2dec7b42..c38dc47cd767 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2060,6 +2060,35 @@ bool amdgpu_need_backup(struct amdgpu_device *adev)
2060 return amdgpu_lockup_timeout > 0 ? true : false; 2060 return amdgpu_lockup_timeout > 0 ? true : false;
2061} 2061}
2062 2062
2063static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
2064 struct amdgpu_ring *ring,
2065 struct amdgpu_bo *bo,
2066 struct fence **fence)
2067{
2068 uint32_t domain;
2069 int r;
2070
2071 if (!bo->shadow)
2072 return 0;
2073
2074 r = amdgpu_bo_reserve(bo, false);
2075 if (r)
2076 return r;
2077 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
2078 /* if bo has been evicted, then no need to recover */
2079 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
2080 r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
2081 NULL, fence, true);
2082 if (r) {
2083 DRM_ERROR("recover page table failed!\n");
2084 goto err;
2085 }
2086 }
2087err:
2088 amdgpu_bo_unreserve(bo);
2089 return r;
2090}
2091
2063/** 2092/**
2064 * amdgpu_gpu_reset - reset the asic 2093 * amdgpu_gpu_reset - reset the asic
2065 * 2094 *
@@ -2138,13 +2167,46 @@ retry:
2138 if (r) { 2167 if (r) {
2139 dev_err(adev->dev, "ib ring test failed (%d).\n", r); 2168 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
2140 r = amdgpu_suspend(adev); 2169 r = amdgpu_suspend(adev);
2170 need_full_reset = true;
2141 goto retry; 2171 goto retry;
2142 } 2172 }
2173 /**
2174 * recovery vm page tables, since we cannot depend on VRAM is
2175 * consistent after gpu full reset.
2176 */
2177 if (need_full_reset && amdgpu_need_backup(adev)) {
2178 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
2179 struct amdgpu_bo *bo, *tmp;
2180 struct fence *fence = NULL, *next = NULL;
2181
2182 DRM_INFO("recover vram bo from shadow\n");
2183 mutex_lock(&adev->shadow_list_lock);
2184 list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
2185 amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
2186 if (fence) {
2187 r = fence_wait(fence, false);
2188 if (r) {
2189 WARN(r, "recovery from shadow isn't comleted\n");
2190 break;
2191 }
2192 }
2143 2193
2194 fence_put(fence);
2195 fence = next;
2196 }
2197 mutex_unlock(&adev->shadow_list_lock);
2198 if (fence) {
2199 r = fence_wait(fence, false);
2200 if (r)
2201 WARN(r, "recovery from shadow isn't comleted\n");
2202 }
2203 fence_put(fence);
2204 }
2144 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2205 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2145 struct amdgpu_ring *ring = adev->rings[i]; 2206 struct amdgpu_ring *ring = adev->rings[i];
2146 if (!ring) 2207 if (!ring)
2147 continue; 2208 continue;
2209
2148 amd_sched_job_recovery(&ring->sched); 2210 amd_sched_job_recovery(&ring->sched);
2149 kthread_unpark(ring->sched.thread); 2211 kthread_unpark(ring->sched.thread);
2150 } 2212 }