aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c174
1 files changed, 174 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a024217896fd..b4bc83aa5999 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1140,6 +1140,180 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1140} 1140}
1141 1141
1142/** 1142/**
1143 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1144 *
1145 * @adev: amdgpu device
1146 * @filp: file private
1147 * @user: drm_amdgpu_fence copied from user space
1148 */
1149static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1150 struct drm_file *filp,
1151 struct drm_amdgpu_fence *user)
1152{
1153 struct amdgpu_ring *ring;
1154 struct amdgpu_ctx *ctx;
1155 struct dma_fence *fence;
1156 int r;
1157
1158 r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
1159 user->ring, &ring);
1160 if (r)
1161 return ERR_PTR(r);
1162
1163 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1164 if (ctx == NULL)
1165 return ERR_PTR(-EINVAL);
1166
1167 fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
1168 amdgpu_ctx_put(ctx);
1169
1170 return fence;
1171}
1172
1173/**
1174 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1175 *
1176 * @adev: amdgpu device
1177 * @filp: file private
1178 * @wait: wait parameters
1179 * @fences: array of drm_amdgpu_fence
1180 */
1181static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1182 struct drm_file *filp,
1183 union drm_amdgpu_wait_fences *wait,
1184 struct drm_amdgpu_fence *fences)
1185{
1186 uint32_t fence_count = wait->in.fence_count;
1187 unsigned int i;
1188 long r = 1;
1189
1190 for (i = 0; i < fence_count; i++) {
1191 struct dma_fence *fence;
1192 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1193
1194 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1195 if (IS_ERR(fence))
1196 return PTR_ERR(fence);
1197 else if (!fence)
1198 continue;
1199
1200 r = dma_fence_wait_timeout(fence, true, timeout);
1201 if (r < 0)
1202 return r;
1203
1204 if (r == 0)
1205 break;
1206 }
1207
1208 memset(wait, 0, sizeof(*wait));
1209 wait->out.status = (r > 0);
1210
1211 return 0;
1212}
1213
1214/**
1215 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1216 *
1217 * @adev: amdgpu device
1218 * @filp: file private
1219 * @wait: wait parameters
1220 * @fences: array of drm_amdgpu_fence
1221 */
1222static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1223 struct drm_file *filp,
1224 union drm_amdgpu_wait_fences *wait,
1225 struct drm_amdgpu_fence *fences)
1226{
1227 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1228 uint32_t fence_count = wait->in.fence_count;
1229 uint32_t first = ~0;
1230 struct dma_fence **array;
1231 unsigned int i;
1232 long r;
1233
1234 /* Prepare the fence array */
1235 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1236
1237 if (array == NULL)
1238 return -ENOMEM;
1239
1240 for (i = 0; i < fence_count; i++) {
1241 struct dma_fence *fence;
1242
1243 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1244 if (IS_ERR(fence)) {
1245 r = PTR_ERR(fence);
1246 goto err_free_fence_array;
1247 } else if (fence) {
1248 array[i] = fence;
1249 } else { /* NULL, the fence has been already signaled */
1250 r = 1;
1251 goto out;
1252 }
1253 }
1254
1255 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1256 &first);
1257 if (r < 0)
1258 goto err_free_fence_array;
1259
1260out:
1261 memset(wait, 0, sizeof(*wait));
1262 wait->out.status = (r > 0);
1263 wait->out.first_signaled = first;
1264 /* set return value 0 to indicate success */
1265 r = 0;
1266
1267err_free_fence_array:
1268 for (i = 0; i < fence_count; i++)
1269 dma_fence_put(array[i]);
1270 kfree(array);
1271
1272 return r;
1273}
1274
1275/**
1276 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1277 *
1278 * @dev: drm device
1279 * @data: data from userspace
1280 * @filp: file private
1281 */
1282int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1283 struct drm_file *filp)
1284{
1285 struct amdgpu_device *adev = dev->dev_private;
1286 union drm_amdgpu_wait_fences *wait = data;
1287 uint32_t fence_count = wait->in.fence_count;
1288 struct drm_amdgpu_fence *fences_user;
1289 struct drm_amdgpu_fence *fences;
1290 int r;
1291
1292 /* Get the fences from userspace */
1293 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1294 GFP_KERNEL);
1295 if (fences == NULL)
1296 return -ENOMEM;
1297
1298 fences_user = (void __user *)(unsigned long)(wait->in.fences);
1299 if (copy_from_user(fences, fences_user,
1300 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1301 r = -EFAULT;
1302 goto err_free_fences;
1303 }
1304
1305 if (wait->in.wait_all)
1306 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1307 else
1308 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1309
1310err_free_fences:
1311 kfree(fences);
1312
1313 return r;
1314}
1315
1316/**
1143 * amdgpu_cs_find_bo_va - find bo_va for VM address 1317 * amdgpu_cs_find_bo_va - find bo_va for VM address
1144 * 1318 *
1145 * @parser: command submission parser context 1319 * @parser: command submission parser context