aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2018-03-15 17:27:52 -0400
committerOded Gabbay <oded.gabbay@gmail.com>2018-03-15 17:27:52 -0400
commit0fc8011f89feb8b2c3008583b777d097e1974660 (patch)
tree77c0a44e6e9cefad545fc45141680b3b3b1dc7e1
parent5ec7e02854b3b9b55936c3b44b8acfb85e333f49 (diff)
drm/amdkfd: Kmap event page for dGPUs
The events page must be accessible in user mode by the GPU and CPU as well as in kernel mode by the CPU. On dGPUs user mode virtual addresses are managed by the Thunk's GPU memory allocation code. Therefore we can't allocate the memory in kernel mode like we do on APUs. But KFD still needs to map the memory for kernel access. To facilitate this, the Thunk provides the buffer handle of the events page to KFD when creating the first event. Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
3 files changed, 87 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index a563ff2ca7dd..ec0b2c0284ec 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -923,6 +923,58 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
923 struct kfd_ioctl_create_event_args *args = data; 923 struct kfd_ioctl_create_event_args *args = data;
924 int err; 924 int err;
925 925
926 /* For dGPUs the event page is allocated in user mode. The
927 * handle is passed to KFD with the first call to this IOCTL
928 * through the event_page_offset field.
929 */
930 if (args->event_page_offset) {
931 struct kfd_dev *kfd;
932 struct kfd_process_device *pdd;
933 void *mem, *kern_addr;
934 uint64_t size;
935
936 if (p->signal_page) {
937 pr_err("Event page is already set\n");
938 return -EINVAL;
939 }
940
941 kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
942 if (!kfd) {
943 pr_err("Getting device by id failed in %s\n", __func__);
944 return -EINVAL;
945 }
946
947 mutex_lock(&p->mutex);
948 pdd = kfd_bind_process_to_device(kfd, p);
949 if (IS_ERR(pdd)) {
950 err = PTR_ERR(pdd);
951 goto out_unlock;
952 }
953
954 mem = kfd_process_device_translate_handle(pdd,
955 GET_IDR_HANDLE(args->event_page_offset));
956 if (!mem) {
957 pr_err("Can't find BO, offset is 0x%llx\n",
958 args->event_page_offset);
959 err = -EINVAL;
960 goto out_unlock;
961 }
962 mutex_unlock(&p->mutex);
963
964 err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
965 mem, &kern_addr, &size);
966 if (err) {
967 pr_err("Failed to map event page to kernel\n");
968 return err;
969 }
970
971 err = kfd_event_page_set(p, kern_addr, size);
972 if (err) {
973 pr_err("Failed to set event page\n");
974 return err;
975 }
976 }
977
926 err = kfd_event_create(filp, p, args->event_type, 978 err = kfd_event_create(filp, p, args->event_type,
927 args->auto_reset != 0, args->node_id, 979 args->auto_reset != 0, args->node_id,
928 &args->event_id, &args->event_trigger_data, 980 &args->event_id, &args->event_trigger_data,
@@ -930,6 +982,10 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
930 &args->event_slot_index); 982 &args->event_slot_index);
931 983
932 return err; 984 return err;
985
986out_unlock:
987 mutex_unlock(&p->mutex);
988 return err;
933} 989}
934 990
935static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p, 991static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 6fb9c0d46d63..4890a90f1e44 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -52,6 +52,7 @@ struct kfd_event_waiter {
52struct kfd_signal_page { 52struct kfd_signal_page {
53 uint64_t *kernel_address; 53 uint64_t *kernel_address;
54 uint64_t __user *user_address; 54 uint64_t __user *user_address;
55 bool need_to_free_pages;
55}; 56};
56 57
57 58
@@ -79,6 +80,7 @@ static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
79 KFD_SIGNAL_EVENT_LIMIT * 8); 80 KFD_SIGNAL_EVENT_LIMIT * 8);
80 81
81 page->kernel_address = backing_store; 82 page->kernel_address = backing_store;
83 page->need_to_free_pages = true;
82 pr_debug("Allocated new event signal page at %p, for process %p\n", 84 pr_debug("Allocated new event signal page at %p, for process %p\n",
83 page, p); 85 page, p);
84 86
@@ -269,8 +271,9 @@ static void shutdown_signal_page(struct kfd_process *p)
269 struct kfd_signal_page *page = p->signal_page; 271 struct kfd_signal_page *page = p->signal_page;
270 272
271 if (page) { 273 if (page) {
272 free_pages((unsigned long)page->kernel_address, 274 if (page->need_to_free_pages)
273 get_order(KFD_SIGNAL_EVENT_LIMIT * 8)); 275 free_pages((unsigned long)page->kernel_address,
276 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
274 kfree(page); 277 kfree(page);
275 } 278 }
276} 279}
@@ -292,6 +295,30 @@ static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
292 return ev->type == KFD_EVENT_TYPE_SIGNAL; 295 return ev->type == KFD_EVENT_TYPE_SIGNAL;
293} 296}
294 297
298int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
299 uint64_t size)
300{
301 struct kfd_signal_page *page;
302
303 if (p->signal_page)
304 return -EBUSY;
305
306 page = kzalloc(sizeof(*page), GFP_KERNEL);
307 if (!page)
308 return -ENOMEM;
309
310 /* Initialize all events to unsignaled */
311 memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
312 KFD_SIGNAL_EVENT_LIMIT * 8);
313
314 page->kernel_address = kernel_address;
315
316 p->signal_page = page;
317 p->signal_mapped_size = size;
318
319 return 0;
320}
321
295int kfd_event_create(struct file *devkfd, struct kfd_process *p, 322int kfd_event_create(struct file *devkfd, struct kfd_process *p,
296 uint32_t event_type, bool auto_reset, uint32_t node_id, 323 uint32_t event_type, bool auto_reset, uint32_t node_id,
297 uint32_t *event_id, uint32_t *event_trigger_data, 324 uint32_t *event_id, uint32_t *event_trigger_data,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 1542807373d7..aa9386356587 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -866,6 +866,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev,
866void kfd_signal_hw_exception_event(unsigned int pasid); 866void kfd_signal_hw_exception_event(unsigned int pasid);
867int kfd_set_event(struct kfd_process *p, uint32_t event_id); 867int kfd_set_event(struct kfd_process *p, uint32_t event_id);
868int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 868int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
869int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
870 uint64_t size);
869int kfd_event_create(struct file *devkfd, struct kfd_process *p, 871int kfd_event_create(struct file *devkfd, struct kfd_process *p,
870 uint32_t event_type, bool auto_reset, uint32_t node_id, 872 uint32_t event_type, bool auto_reset, uint32_t node_id,
871 uint32_t *event_id, uint32_t *event_trigger_data, 873 uint32_t *event_id, uint32_t *event_trigger_data,