aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-11-10 18:28:44 -0500
committerDave Airlie <airlied@redhat.com>2016-11-10 18:28:44 -0500
commit3e91168a6a76f7e21c44f04ebf953589ca59f03c (patch)
tree27b12142f17fc575fd40825b91e7e8773994f13d /drivers/gpu/drm/amd/amdgpu
parentdb8feb6979e91c2e916631a75dbfe9f10f6b05e5 (diff)
parent4b514e10157a8e34a5e909487ef6fb8342e2e3ad (diff)
Merge tag 'topic/drm-misc-2016-11-10' of git://anongit.freedesktop.org/drm-intel into drm-next
- better atomic state debugging from Rob - fence prep from gustavo - sumits flushed out his backlog of pending dma-buf/fence patches from various people - drm_mm leak debugging plus trying to appease Kconfig (Chris) - a few misc things all over * tag 'topic/drm-misc-2016-11-10' of git://anongit.freedesktop.org/drm-intel: (35 commits) drm: Make DRM_DEBUG_MM depend on STACKTRACE_SUPPORT drm/i915: Restrict DRM_DEBUG_MM automatic selection drm: Restrict stackdepot usage to builtin drm.ko drm/msm: module param to dump state on error irq drm/msm/mdp5: add atomic_print_state support drm/atomic: add debugfs file to dump out atomic state drm/atomic: add new drm_debug bit to dump atomic state drm: add helpers to go from plane state to drm_rect drm: add helper for printing to log or seq_file drm: helper macros to print composite types reservation: revert "wait only with non-zero timeout specified (v3)" v2 drm/ttm: fix ttm_bo_wait dma-buf/fence: revert "don't wait when specified timeout is zero" (v2) dma-buf/fence: make timeout handling in fence_default_wait consistent (v2) drm/amdgpu: add the interface of waiting multiple fences (v4) dma-buf: return index of the first signaled fence (v2) MAINTAINERS: update Sync File Framework files dma-buf/sw_sync: put fence reference from the fence creation dma-buf/sw_sync: mark sync_timeline_create() static drm: Add stackdepot include for DRM_DEBUG_MM ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c3
4 files changed, 179 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2ec7b3baeec2..c2b8496cdf63 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1212,6 +1212,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1212 struct drm_file *filp); 1212 struct drm_file *filp);
1213int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1213int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1214int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); 1214int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1215int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1216 struct drm_file *filp);
1215 1217
1216int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, 1218int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1217 struct drm_file *filp); 1219 struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a370101d923f..78da52f90099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1141,6 +1141,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1141} 1141}
1142 1142
1143/** 1143/**
1144 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1145 *
1146 * @adev: amdgpu device
1147 * @filp: file private
1148 * @user: drm_amdgpu_fence copied from user space
1149 */
1150static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1151 struct drm_file *filp,
1152 struct drm_amdgpu_fence *user)
1153{
1154 struct amdgpu_ring *ring;
1155 struct amdgpu_ctx *ctx;
1156 struct dma_fence *fence;
1157 int r;
1158
1159 r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
1160 user->ring, &ring);
1161 if (r)
1162 return ERR_PTR(r);
1163
1164 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1165 if (ctx == NULL)
1166 return ERR_PTR(-EINVAL);
1167
1168 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1169 amdgpu_ctx_put(ctx);
1170
1171 return fence;
1172}
1173
1174/**
1175 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1176 *
1177 * @adev: amdgpu device
1178 * @filp: file private
1179 * @wait: wait parameters
1180 * @fences: array of drm_amdgpu_fence
1181 */
1182static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1183 struct drm_file *filp,
1184 union drm_amdgpu_wait_fences *wait,
1185 struct drm_amdgpu_fence *fences)
1186{
1187 uint32_t fence_count = wait->in.fence_count;
1188 unsigned int i;
1189 long r = 1;
1190
1191 for (i = 0; i < fence_count; i++) {
1192 struct dma_fence *fence;
1193 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1194
1195 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1196 if (IS_ERR(fence))
1197 return PTR_ERR(fence);
1198 else if (!fence)
1199 continue;
1200
1201 r = dma_fence_wait_timeout(fence, true, timeout);
1202 if (r < 0)
1203 return r;
1204
1205 if (r == 0)
1206 break;
1207 }
1208
1209 memset(wait, 0, sizeof(*wait));
1210 wait->out.status = (r > 0);
1211
1212 return 0;
1213}
1214
1215/**
1216 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1217 *
1218 * @adev: amdgpu device
1219 * @filp: file private
1220 * @wait: wait parameters
1221 * @fences: array of drm_amdgpu_fence
1222 */
1223static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1224 struct drm_file *filp,
1225 union drm_amdgpu_wait_fences *wait,
1226 struct drm_amdgpu_fence *fences)
1227{
1228 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1229 uint32_t fence_count = wait->in.fence_count;
1230 uint32_t first = ~0;
1231 struct dma_fence **array;
1232 unsigned int i;
1233 long r;
1234
1235 /* Prepare the fence array */
1236 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1237
1238 if (array == NULL)
1239 return -ENOMEM;
1240
1241 for (i = 0; i < fence_count; i++) {
1242 struct dma_fence *fence;
1243
1244 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1245 if (IS_ERR(fence)) {
1246 r = PTR_ERR(fence);
1247 goto err_free_fence_array;
1248 } else if (fence) {
1249 array[i] = fence;
1250 } else { /* NULL, the fence has been already signaled */
1251 r = 1;
1252 goto out;
1253 }
1254 }
1255
1256 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1257 &first);
1258 if (r < 0)
1259 goto err_free_fence_array;
1260
1261out:
1262 memset(wait, 0, sizeof(*wait));
1263 wait->out.status = (r > 0);
1264 wait->out.first_signaled = first;
1265 /* set return value 0 to indicate success */
1266 r = 0;
1267
1268err_free_fence_array:
1269 for (i = 0; i < fence_count; i++)
1270 dma_fence_put(array[i]);
1271 kfree(array);
1272
1273 return r;
1274}
1275
1276/**
1277 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1278 *
1279 * @dev: drm device
1280 * @data: data from userspace
1281 * @filp: file private
1282 */
1283int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1284 struct drm_file *filp)
1285{
1286 struct amdgpu_device *adev = dev->dev_private;
1287 union drm_amdgpu_wait_fences *wait = data;
1288 uint32_t fence_count = wait->in.fence_count;
1289 struct drm_amdgpu_fence *fences_user;
1290 struct drm_amdgpu_fence *fences;
1291 int r;
1292
1293 /* Get the fences from userspace */
1294 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1295 GFP_KERNEL);
1296 if (fences == NULL)
1297 return -ENOMEM;
1298
1299 fences_user = (void __user *)(unsigned long)(wait->in.fences);
1300 if (copy_from_user(fences, fences_user,
1301 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1302 r = -EFAULT;
1303 goto err_free_fences;
1304 }
1305
1306 if (wait->in.wait_all)
1307 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1308 else
1309 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1310
1311err_free_fences:
1312 kfree(fences);
1313
1314 return r;
1315}
1316
1317/**
1144 * amdgpu_cs_find_bo_va - find bo_va for VM address 1318 * amdgpu_cs_find_bo_va - find bo_va for VM address
1145 * 1319 *
1146 * @parser: command submission parser context 1320 * @parser: command submission parser context
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 55c413a55a40..ad908612aff9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -823,6 +823,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
823 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 823 DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
824 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 824 DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
825 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 825 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
826 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
826 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 827 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
827 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 828 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
828 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 829 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index fd26c4b8d793..34a795463988 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -361,7 +361,8 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
361 if (count) { 361 if (count) {
362 spin_unlock(&sa_manager->wq.lock); 362 spin_unlock(&sa_manager->wq.lock);
363 t = dma_fence_wait_any_timeout(fences, count, false, 363 t = dma_fence_wait_any_timeout(fences, count, false,
364 MAX_SCHEDULE_TIMEOUT); 364 MAX_SCHEDULE_TIMEOUT,
365 NULL);
365 for (i = 0; i < count; ++i) 366 for (i = 0; i < count; ++i)
366 dma_fence_put(fences[i]); 367 dma_fence_put(fences[i]);
367 368