diff options
| author | Oded Gabbay <oded.gabbay@amd.com> | 2014-12-29 06:52:22 -0500 |
|---|---|---|
| committer | Oded Gabbay <oded.gabbay@amd.com> | 2015-01-06 12:44:26 -0500 |
| commit | 524a640444ae19593dd4e9e80075041c1ed831bd (patch) | |
| tree | e485def24cc1bb323fa8916d4c9af790caf52922 | |
| parent | 2030664b709caa769f2b6a1d2e71d8cb343c6884 (diff) | |
drm/amdkfd: Do copy_to/from_user in general kfd_ioctl()
This patch moves the copy_to_user() and copy_from_user() calls from the
different ioctl functions in amdkfd to the general kfd_ioctl() function, as
this is a common code for all ioctls.
This was done according to example taken from drm_ioctl.c
Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 234 |
1 files changed, 117 insertions, 117 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index fe5c543599b0..249f4921f4a8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | |||
| @@ -126,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep) | |||
| 126 | return 0; | 126 | return 0; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, | 129 | static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, |
| 130 | void __user *arg) | 130 | void *data) |
| 131 | { | 131 | { |
| 132 | struct kfd_ioctl_get_version_args args; | 132 | struct kfd_ioctl_get_version_args *args = data; |
| 133 | int err = 0; | 133 | int err = 0; |
| 134 | 134 | ||
| 135 | args.major_version = KFD_IOCTL_MAJOR_VERSION; | 135 | args->major_version = KFD_IOCTL_MAJOR_VERSION; |
| 136 | args.minor_version = KFD_IOCTL_MINOR_VERSION; | 136 | args->minor_version = KFD_IOCTL_MINOR_VERSION; |
| 137 | |||
| 138 | if (copy_to_user(arg, &args, sizeof(args))) | ||
| 139 | err = -EFAULT; | ||
| 140 | 137 | ||
| 141 | return err; | 138 | return err; |
| 142 | } | 139 | } |
| @@ -220,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, | |||
| 220 | return 0; | 217 | return 0; |
| 221 | } | 218 | } |
| 222 | 219 | ||
| 223 | static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, | 220 | static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, |
| 224 | void __user *arg) | 221 | void *data) |
| 225 | { | 222 | { |
| 226 | struct kfd_ioctl_create_queue_args args; | 223 | struct kfd_ioctl_create_queue_args *args = data; |
| 227 | struct kfd_dev *dev; | 224 | struct kfd_dev *dev; |
| 228 | int err = 0; | 225 | int err = 0; |
| 229 | unsigned int queue_id; | 226 | unsigned int queue_id; |
| @@ -232,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, | |||
| 232 | 229 | ||
| 233 | memset(&q_properties, 0, sizeof(struct queue_properties)); | 230 | memset(&q_properties, 0, sizeof(struct queue_properties)); |
| 234 | 231 | ||
| 235 | if (copy_from_user(&args, arg, sizeof(args))) | ||
| 236 | return -EFAULT; | ||
| 237 | |||
| 238 | pr_debug("kfd: creating queue ioctl\n"); | 232 | pr_debug("kfd: creating queue ioctl\n"); |
| 239 | 233 | ||
| 240 | err = set_queue_properties_from_user(&q_properties, &args); | 234 | err = set_queue_properties_from_user(&q_properties, args); |
| 241 | if (err) | 235 | if (err) |
| 242 | return err; | 236 | return err; |
| 243 | 237 | ||
| 244 | dev = kfd_device_by_id(args.gpu_id); | 238 | dev = kfd_device_by_id(args->gpu_id); |
| 245 | if (dev == NULL) | 239 | if (dev == NULL) |
| 246 | return -EINVAL; | 240 | return -EINVAL; |
| 247 | 241 | ||
| @@ -249,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, | |||
| 249 | 243 | ||
| 250 | pdd = kfd_bind_process_to_device(dev, p); | 244 | pdd = kfd_bind_process_to_device(dev, p); |
| 251 | if (IS_ERR(pdd)) { | 245 | if (IS_ERR(pdd)) { |
| 252 | err = PTR_ERR(pdd); | 246 | err = -ESRCH; |
| 253 | goto err_bind_process; | 247 | goto err_bind_process; |
| 254 | } | 248 | } |
| 255 | 249 | ||
| @@ -262,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, | |||
| 262 | if (err != 0) | 256 | if (err != 0) |
| 263 | goto err_create_queue; | 257 | goto err_create_queue; |
| 264 | 258 | ||
| 265 | args.queue_id = queue_id; | 259 | args->queue_id = queue_id; |
| 266 | 260 | ||
| 267 | /* Return gpu_id as doorbell offset for mmap usage */ | 261 | /* Return gpu_id as doorbell offset for mmap usage */ |
| 268 | args.doorbell_offset = args.gpu_id << PAGE_SHIFT; | 262 | args->doorbell_offset = args->gpu_id << PAGE_SHIFT; |
| 269 | |||
| 270 | if (copy_to_user(arg, &args, sizeof(args))) { | ||
| 271 | err = -EFAULT; | ||
| 272 | goto err_copy_args_out; | ||
| 273 | } | ||
| 274 | 263 | ||
| 275 | mutex_unlock(&p->mutex); | 264 | mutex_unlock(&p->mutex); |
| 276 | 265 | ||
| 277 | pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); | 266 | pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); |
| 278 | 267 | ||
| 279 | pr_debug("ring buffer address == 0x%016llX\n", | 268 | pr_debug("ring buffer address == 0x%016llX\n", |
| 280 | args.ring_base_address); | 269 | args->ring_base_address); |
| 281 | 270 | ||
| 282 | pr_debug("read ptr address == 0x%016llX\n", | 271 | pr_debug("read ptr address == 0x%016llX\n", |
| 283 | args.read_pointer_address); | 272 | args->read_pointer_address); |
| 284 | 273 | ||
| 285 | pr_debug("write ptr address == 0x%016llX\n", | 274 | pr_debug("write ptr address == 0x%016llX\n", |
| 286 | args.write_pointer_address); | 275 | args->write_pointer_address); |
| 287 | 276 | ||
| 288 | return 0; | 277 | return 0; |
| 289 | 278 | ||
| 290 | err_copy_args_out: | ||
| 291 | pqm_destroy_queue(&p->pqm, queue_id); | ||
| 292 | err_create_queue: | 279 | err_create_queue: |
| 293 | err_bind_process: | 280 | err_bind_process: |
| 294 | mutex_unlock(&p->mutex); | 281 | mutex_unlock(&p->mutex); |
| @@ -296,99 +283,90 @@ err_bind_process: | |||
| 296 | } | 283 | } |
| 297 | 284 | ||
| 298 | static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, | 285 | static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, |
| 299 | void __user *arg) | 286 | void *data) |
| 300 | { | 287 | { |
| 301 | int retval; | 288 | int retval; |
| 302 | struct kfd_ioctl_destroy_queue_args args; | 289 | struct kfd_ioctl_destroy_queue_args *args = data; |
| 303 | |||
| 304 | if (copy_from_user(&args, arg, sizeof(args))) | ||
| 305 | return -EFAULT; | ||
| 306 | 290 | ||
| 307 | pr_debug("kfd: destroying queue id %d for PASID %d\n", | 291 | pr_debug("kfd: destroying queue id %d for PASID %d\n", |
| 308 | args.queue_id, | 292 | args->queue_id, |
| 309 | p->pasid); | 293 | p->pasid); |
| 310 | 294 | ||
| 311 | mutex_lock(&p->mutex); | 295 | mutex_lock(&p->mutex); |
| 312 | 296 | ||
| 313 | retval = pqm_destroy_queue(&p->pqm, args.queue_id); | 297 | retval = pqm_destroy_queue(&p->pqm, args->queue_id); |
| 314 | 298 | ||
| 315 | mutex_unlock(&p->mutex); | 299 | mutex_unlock(&p->mutex); |
| 316 | return retval; | 300 | return retval; |
| 317 | } | 301 | } |
| 318 | 302 | ||
| 319 | static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, | 303 | static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, |
| 320 | void __user *arg) | 304 | void *data) |
| 321 | { | 305 | { |
| 322 | int retval; | 306 | int retval; |
| 323 | struct kfd_ioctl_update_queue_args args; | 307 | struct kfd_ioctl_update_queue_args *args = data; |
| 324 | struct queue_properties properties; | 308 | struct queue_properties properties; |
| 325 | 309 | ||
| 326 | if (copy_from_user(&args, arg, sizeof(args))) | 310 | if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { |
| 327 | return -EFAULT; | ||
| 328 | |||
| 329 | if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { | ||
| 330 | pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); | 311 | pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); |
| 331 | return -EINVAL; | 312 | return -EINVAL; |
| 332 | } | 313 | } |
| 333 | 314 | ||
| 334 | if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { | 315 | if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { |
| 335 | pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); | 316 | pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); |
| 336 | return -EINVAL; | 317 | return -EINVAL; |
| 337 | } | 318 | } |
| 338 | 319 | ||
| 339 | if ((args.ring_base_address) && | 320 | if ((args->ring_base_address) && |
| 340 | (!access_ok(VERIFY_WRITE, | 321 | (!access_ok(VERIFY_WRITE, |
| 341 | (const void __user *) args.ring_base_address, | 322 | (const void __user *) args->ring_base_address, |
| 342 | sizeof(uint64_t)))) { | 323 | sizeof(uint64_t)))) { |
| 343 | pr_err("kfd: can't access ring base address\n"); | 324 | pr_err("kfd: can't access ring base address\n"); |
| 344 | return -EFAULT; | 325 | return -EFAULT; |
| 345 | } | 326 | } |
| 346 | 327 | ||
| 347 | if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { | 328 | if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { |
| 348 | pr_err("kfd: ring size must be a power of 2 or 0\n"); | 329 | pr_err("kfd: ring size must be a power of 2 or 0\n"); |
| 349 | return -EINVAL; | 330 | return -EINVAL; |
| 350 | } | 331 | } |
| 351 | 332 | ||
| 352 | properties.queue_address = args.ring_base_address; | 333 | properties.queue_address = args->ring_base_address; |
| 353 | properties.queue_size = args.ring_size; | 334 | properties.queue_size = args->ring_size; |
| 354 | properties.queue_percent = args.queue_percentage; | 335 | properties.queue_percent = args->queue_percentage; |
| 355 | properties.priority = args.queue_priority; | 336 | properties.priority = args->queue_priority; |
| 356 | 337 | ||
| 357 | pr_debug("kfd: updating queue id %d for PASID %d\n", | 338 | pr_debug("kfd: updating queue id %d for PASID %d\n", |
| 358 | args.queue_id, p->pasid); | 339 | args->queue_id, p->pasid); |
| 359 | 340 | ||
| 360 | mutex_lock(&p->mutex); | 341 | mutex_lock(&p->mutex); |
| 361 | 342 | ||
| 362 | retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); | 343 | retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); |
| 363 | 344 | ||
| 364 | mutex_unlock(&p->mutex); | 345 | mutex_unlock(&p->mutex); |
| 365 | 346 | ||
| 366 | return retval; | 347 | return retval; |
| 367 | } | 348 | } |
| 368 | 349 | ||
| 369 | static long kfd_ioctl_set_memory_policy(struct file *filep, | 350 | static int kfd_ioctl_set_memory_policy(struct file *filep, |
| 370 | struct kfd_process *p, void __user *arg) | 351 | struct kfd_process *p, void *data) |
| 371 | { | 352 | { |
| 372 | struct kfd_ioctl_set_memory_policy_args args; | 353 | struct kfd_ioctl_set_memory_policy_args *args = data; |
| 373 | struct kfd_dev *dev; | 354 | struct kfd_dev *dev; |
| 374 | int err = 0; | 355 | int err = 0; |
| 375 | struct kfd_process_device *pdd; | 356 | struct kfd_process_device *pdd; |
| 376 | enum cache_policy default_policy, alternate_policy; | 357 | enum cache_policy default_policy, alternate_policy; |
| 377 | 358 | ||
| 378 | if (copy_from_user(&args, arg, sizeof(args))) | 359 | if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT |
| 379 | return -EFAULT; | 360 | && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { |
| 380 | |||
| 381 | if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT | ||
| 382 | && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { | ||
| 383 | return -EINVAL; | 361 | return -EINVAL; |
| 384 | } | 362 | } |
| 385 | 363 | ||
| 386 | if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT | 364 | if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT |
| 387 | && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { | 365 | && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { |
| 388 | return -EINVAL; | 366 | return -EINVAL; |
| 389 | } | 367 | } |
| 390 | 368 | ||
| 391 | dev = kfd_device_by_id(args.gpu_id); | 369 | dev = kfd_device_by_id(args->gpu_id); |
| 392 | if (dev == NULL) | 370 | if (dev == NULL) |
| 393 | return -EINVAL; | 371 | return -EINVAL; |
| 394 | 372 | ||
| @@ -396,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep, | |||
| 396 | 374 | ||
| 397 | pdd = kfd_bind_process_to_device(dev, p); | 375 | pdd = kfd_bind_process_to_device(dev, p); |
| 398 | if (IS_ERR(pdd)) { | 376 | if (IS_ERR(pdd)) { |
| 399 | err = PTR_ERR(pdd); | 377 | err = -ESRCH; |
| 400 | goto out; | 378 | goto out; |
| 401 | } | 379 | } |
| 402 | 380 | ||
| 403 | default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) | 381 | default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) |
| 404 | ? cache_policy_coherent : cache_policy_noncoherent; | 382 | ? cache_policy_coherent : cache_policy_noncoherent; |
| 405 | 383 | ||
| 406 | alternate_policy = | 384 | alternate_policy = |
| 407 | (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) | 385 | (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) |
| 408 | ? cache_policy_coherent : cache_policy_noncoherent; | 386 | ? cache_policy_coherent : cache_policy_noncoherent; |
| 409 | 387 | ||
| 410 | if (!dev->dqm->set_cache_memory_policy(dev->dqm, | 388 | if (!dev->dqm->set_cache_memory_policy(dev->dqm, |
| 411 | &pdd->qpd, | 389 | &pdd->qpd, |
| 412 | default_policy, | 390 | default_policy, |
| 413 | alternate_policy, | 391 | alternate_policy, |
| 414 | (void __user *)args.alternate_aperture_base, | 392 | (void __user *)args->alternate_aperture_base, |
| 415 | args.alternate_aperture_size)) | 393 | args->alternate_aperture_size)) |
| 416 | err = -EINVAL; | 394 | err = -EINVAL; |
| 417 | 395 | ||
| 418 | out: | 396 | out: |
| @@ -421,53 +399,44 @@ out: | |||
| 421 | return err; | 399 | return err; |
| 422 | } | 400 | } |
| 423 | 401 | ||
| 424 | static long kfd_ioctl_get_clock_counters(struct file *filep, | 402 | static int kfd_ioctl_get_clock_counters(struct file *filep, |
| 425 | struct kfd_process *p, void __user *arg) | 403 | struct kfd_process *p, void *data) |
| 426 | { | 404 | { |
| 427 | struct kfd_ioctl_get_clock_counters_args args; | 405 | struct kfd_ioctl_get_clock_counters_args *args = data; |
| 428 | struct kfd_dev *dev; | 406 | struct kfd_dev *dev; |
| 429 | struct timespec time; | 407 | struct timespec time; |
| 430 | 408 | ||
| 431 | if (copy_from_user(&args, arg, sizeof(args))) | 409 | dev = kfd_device_by_id(args->gpu_id); |
| 432 | return -EFAULT; | ||
| 433 | |||
| 434 | dev = kfd_device_by_id(args.gpu_id); | ||
| 435 | if (dev == NULL) | 410 | if (dev == NULL) |
| 436 | return -EINVAL; | 411 | return -EINVAL; |
| 437 | 412 | ||
| 438 | /* Reading GPU clock counter from KGD */ | 413 | /* Reading GPU clock counter from KGD */ |
| 439 | args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); | 414 | args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); |
| 440 | 415 | ||
| 441 | /* No access to rdtsc. Using raw monotonic time */ | 416 | /* No access to rdtsc. Using raw monotonic time */ |
| 442 | getrawmonotonic(&time); | 417 | getrawmonotonic(&time); |
| 443 | args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); | 418 | args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); |
| 444 | 419 | ||
| 445 | get_monotonic_boottime(&time); | 420 | get_monotonic_boottime(&time); |
| 446 | args.system_clock_counter = (uint64_t)timespec_to_ns(&time); | 421 | args->system_clock_counter = (uint64_t)timespec_to_ns(&time); |
| 447 | 422 | ||
| 448 | /* Since the counter is in nano-seconds we use 1GHz frequency */ | 423 | /* Since the counter is in nano-seconds we use 1GHz frequency */ |
| 449 | args.system_clock_freq = 1000000000; | 424 | args->system_clock_freq = 1000000000; |
| 450 | |||
| 451 | if (copy_to_user(arg, &args, sizeof(args))) | ||
| 452 | return -EFAULT; | ||
| 453 | 425 | ||
| 454 | return 0; | 426 | return 0; |
| 455 | } | 427 | } |
| 456 | 428 | ||
| 457 | 429 | ||
| 458 | static int kfd_ioctl_get_process_apertures(struct file *filp, | 430 | static int kfd_ioctl_get_process_apertures(struct file *filp, |
| 459 | struct kfd_process *p, void __user *arg) | 431 | struct kfd_process *p, void *data) |
| 460 | { | 432 | { |
| 461 | struct kfd_ioctl_get_process_apertures_args args; | 433 | struct kfd_ioctl_get_process_apertures_args *args = data; |
| 462 | struct kfd_process_device_apertures *pAperture; | 434 | struct kfd_process_device_apertures *pAperture; |
| 463 | struct kfd_process_device *pdd; | 435 | struct kfd_process_device *pdd; |
| 464 | 436 | ||
| 465 | dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); | 437 | dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); |
| 466 | 438 | ||
| 467 | if (copy_from_user(&args, arg, sizeof(args))) | 439 | args->num_of_nodes = 0; |
| 468 | return -EFAULT; | ||
| 469 | |||
| 470 | args.num_of_nodes = 0; | ||
| 471 | 440 | ||
| 472 | mutex_lock(&p->mutex); | 441 | mutex_lock(&p->mutex); |
| 473 | 442 | ||
| @@ -476,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, | |||
| 476 | /* Run over all pdd of the process */ | 445 | /* Run over all pdd of the process */ |
| 477 | pdd = kfd_get_first_process_device_data(p); | 446 | pdd = kfd_get_first_process_device_data(p); |
| 478 | do { | 447 | do { |
| 479 | pAperture = &args.process_apertures[args.num_of_nodes]; | 448 | pAperture = |
| 449 | &args->process_apertures[args->num_of_nodes]; | ||
| 480 | pAperture->gpu_id = pdd->dev->id; | 450 | pAperture->gpu_id = pdd->dev->id; |
| 481 | pAperture->lds_base = pdd->lds_base; | 451 | pAperture->lds_base = pdd->lds_base; |
| 482 | pAperture->lds_limit = pdd->lds_limit; | 452 | pAperture->lds_limit = pdd->lds_limit; |
| @@ -486,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, | |||
| 486 | pAperture->scratch_limit = pdd->scratch_limit; | 456 | pAperture->scratch_limit = pdd->scratch_limit; |
| 487 | 457 | ||
| 488 | dev_dbg(kfd_device, | 458 | dev_dbg(kfd_device, |
| 489 | "node id %u\n", args.num_of_nodes); | 459 | "node id %u\n", args->num_of_nodes); |
| 490 | dev_dbg(kfd_device, | 460 | dev_dbg(kfd_device, |
| 491 | "gpu id %u\n", pdd->dev->id); | 461 | "gpu id %u\n", pdd->dev->id); |
| 492 | dev_dbg(kfd_device, | 462 | dev_dbg(kfd_device, |
| @@ -502,23 +472,23 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, | |||
| 502 | dev_dbg(kfd_device, | 472 | dev_dbg(kfd_device, |
| 503 | "scratch_limit %llX\n", pdd->scratch_limit); | 473 | "scratch_limit %llX\n", pdd->scratch_limit); |
| 504 | 474 | ||
| 505 | args.num_of_nodes++; | 475 | args->num_of_nodes++; |
| 506 | } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && | 476 | } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && |
| 507 | (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); | 477 | (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); |
| 508 | } | 478 | } |
| 509 | 479 | ||
| 510 | mutex_unlock(&p->mutex); | 480 | mutex_unlock(&p->mutex); |
| 511 | 481 | ||
| 512 | if (copy_to_user(arg, &args, sizeof(args))) | ||
| 513 | return -EFAULT; | ||
| 514 | |||
| 515 | return 0; | 482 | return 0; |
| 516 | } | 483 | } |
| 517 | 484 | ||
| 518 | static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) | 485 | static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
| 519 | { | 486 | { |
| 520 | struct kfd_process *process; | 487 | struct kfd_process *process; |
| 521 | long err = -EINVAL; | 488 | char stack_kdata[128]; |
| 489 | char *kdata = NULL; | ||
| 490 | unsigned int usize, asize; | ||
| 491 | int retcode = -EINVAL; | ||
| 522 | 492 | ||
| 523 | dev_dbg(kfd_device, | 493 | dev_dbg(kfd_device, |
| 524 | "ioctl cmd 0x%x (#%d), arg 0x%lx\n", | 494 | "ioctl cmd 0x%x (#%d), arg 0x%lx\n", |
| @@ -528,54 +498,84 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) | |||
| 528 | if (IS_ERR(process)) | 498 | if (IS_ERR(process)) |
| 529 | return PTR_ERR(process); | 499 | return PTR_ERR(process); |
| 530 | 500 | ||
| 501 | if (cmd & (IOC_IN | IOC_OUT)) { | ||
| 502 | if (asize <= sizeof(stack_kdata)) { | ||
| 503 | kdata = stack_kdata; | ||
| 504 | } else { | ||
| 505 | kdata = kmalloc(asize, GFP_KERNEL); | ||
| 506 | if (!kdata) { | ||
| 507 | retcode = -ENOMEM; | ||
| 508 | goto err_i1; | ||
| 509 | } | ||
| 510 | } | ||
| 511 | if (asize > usize) | ||
| 512 | memset(kdata + usize, 0, asize - usize); | ||
| 513 | } | ||
| 514 | |||
| 515 | if (cmd & IOC_IN) { | ||
| 516 | if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { | ||
| 517 | retcode = -EFAULT; | ||
| 518 | goto err_i1; | ||
| 519 | } | ||
| 520 | } else if (cmd & IOC_OUT) { | ||
| 521 | memset(kdata, 0, usize); | ||
| 522 | } | ||
| 523 | |||
| 524 | |||
| 531 | switch (cmd) { | 525 | switch (cmd) { |
| 532 | case KFD_IOC_GET_VERSION: | 526 | case KFD_IOC_GET_VERSION: |
| 533 | err = kfd_ioctl_get_version(filep, process, (void __user *)arg); | 527 | retcode = kfd_ioctl_get_version(filep, process, kdata); |
| 534 | break; | 528 | break; |
| 535 | case KFD_IOC_CREATE_QUEUE: | 529 | case KFD_IOC_CREATE_QUEUE: |
| 536 | err = kfd_ioctl_create_queue(filep, process, | 530 | retcode = kfd_ioctl_create_queue(filep, process, |
| 537 | (void __user *)arg); | 531 | kdata); |
| 538 | break; | 532 | break; |
| 539 | 533 | ||
| 540 | case KFD_IOC_DESTROY_QUEUE: | 534 | case KFD_IOC_DESTROY_QUEUE: |
| 541 | err = kfd_ioctl_destroy_queue(filep, process, | 535 | retcode = kfd_ioctl_destroy_queue(filep, process, |
| 542 | (void __user *)arg); | 536 | kdata); |
| 543 | break; | 537 | break; |
| 544 | 538 | ||
| 545 | case KFD_IOC_SET_MEMORY_POLICY: | 539 | case KFD_IOC_SET_MEMORY_POLICY: |
| 546 | err = kfd_ioctl_set_memory_policy(filep, process, | 540 | retcode = kfd_ioctl_set_memory_policy(filep, process, |
| 547 | (void __user *)arg); | 541 | kdata); |
| 548 | break; | 542 | break; |
| 549 | 543 | ||
| 550 | case KFD_IOC_GET_CLOCK_COUNTERS: | 544 | case KFD_IOC_GET_CLOCK_COUNTERS: |
| 551 | err = kfd_ioctl_get_clock_counters(filep, process, | 545 | retcode = kfd_ioctl_get_clock_counters(filep, process, |
| 552 | (void __user *)arg); | 546 | kdata); |
| 553 | break; | 547 | break; |
| 554 | 548 | ||
| 555 | case KFD_IOC_GET_PROCESS_APERTURES: | 549 | case KFD_IOC_GET_PROCESS_APERTURES: |
| 556 | err = kfd_ioctl_get_process_apertures(filep, process, | 550 | retcode = kfd_ioctl_get_process_apertures(filep, process, |
| 557 | (void __user *)arg); | 551 | kdata); |
| 558 | break; | 552 | break; |
| 559 | 553 | ||
| 560 | case KFD_IOC_UPDATE_QUEUE: | 554 | case KFD_IOC_UPDATE_QUEUE: |
| 561 | err = kfd_ioctl_update_queue(filep, process, | 555 | retcode = kfd_ioctl_update_queue(filep, process, |
| 562 | (void __user *)arg); | 556 | kdata); |
| 563 | break; | 557 | break; |
| 564 | 558 | ||
| 565 | default: | 559 | default: |
| 566 | dev_err(kfd_device, | 560 | dev_dbg(kfd_device, |
| 567 | "unknown ioctl cmd 0x%x, arg 0x%lx)\n", | 561 | "unknown ioctl cmd 0x%x, arg 0x%lx)\n", |
| 568 | cmd, arg); | 562 | cmd, arg); |
| 569 | err = -EINVAL; | 563 | retcode = -EINVAL; |
| 570 | break; | 564 | break; |
| 571 | } | 565 | } |
| 572 | 566 | ||
| 573 | if (err < 0) | 567 | if (cmd & IOC_OUT) |
| 574 | dev_err(kfd_device, | 568 | if (copy_to_user((void __user *)arg, kdata, usize) != 0) |
| 575 | "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", | 569 | retcode = -EFAULT; |
| 576 | err, cmd, _IOC_NR(cmd)); | ||
| 577 | 570 | ||
| 578 | return err; | 571 | err_i1: |
| 572 | if (kdata != stack_kdata) | ||
| 573 | kfree(kdata); | ||
| 574 | |||
| 575 | if (retcode) | ||
| 576 | dev_dbg(kfd_device, "ret = %d\n", retcode); | ||
| 577 | |||
| 578 | return retcode; | ||
| 579 | } | 579 | } |
| 580 | 580 | ||
| 581 | static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) | 581 | static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) |
