aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2017-08-15 23:00:12 -0400
committerOded Gabbay <oded.gabbay@gmail.com>2017-08-15 23:00:12 -0400
commit32fa821958755a30a9a05ed258e1539c945851d4 (patch)
tree5d38eeb79c8922265bac4f33225410b1d83cb697
parent8625ff9c0ba7db32ce4eb25f6032638c1f88c82f (diff)
drm/amdkfd: Handle remaining BUG_ONs more gracefully v2
In most cases, BUG_ONs can be replaced with WARN_ON with an error return. In some void functions just turn them into a WARN_ON and possibly an early exit. v2: * Cleaned up error handling in pm_send_unmap_queue * Removed redundant WARN_ON in kfd_process_destroy_delayed Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c20
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c44
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
14 files changed, 84 insertions, 55 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index 3841cad0a290..0aa021aa0aa1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -60,7 +60,8 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
60 unsigned int *ib_packet_buff; 60 unsigned int *ib_packet_buff;
61 int status; 61 int status;
62 62
63 BUG_ON(!size_in_bytes); 63 if (WARN_ON(!size_in_bytes))
64 return -EINVAL;
64 65
65 kq = dbgdev->kq; 66 kq = dbgdev->kq;
66 67
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 2d5555c5dfae..3da25f7bda6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -64,7 +64,8 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
64 enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ; 64 enum DBGDEV_TYPE type = DBGDEV_TYPE_DIQ;
65 struct kfd_dbgmgr *new_buff; 65 struct kfd_dbgmgr *new_buff;
66 66
67 BUG_ON(!pdev->init_complete); 67 if (WARN_ON(!pdev->init_complete))
68 return false;
68 69
69 new_buff = kfd_alloc_struct(new_buff); 70 new_buff = kfd_alloc_struct(new_buff);
70 if (!new_buff) { 71 if (!new_buff) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 416955f7c7d4..f628ac38e0c8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -98,7 +98,7 @@ static const struct kfd_device_info *lookup_device_info(unsigned short did)
98 98
99 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { 99 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
100 if (supported_devices[i].did == did) { 100 if (supported_devices[i].did == did) {
101 BUG_ON(!supported_devices[i].device_info); 101 WARN_ON(!supported_devices[i].device_info);
102 return supported_devices[i].device_info; 102 return supported_devices[i].device_info;
103 } 103 }
104 } 104 }
@@ -212,9 +212,8 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
212 flags); 212 flags);
213 213
214 dev = kfd_device_by_pci_dev(pdev); 214 dev = kfd_device_by_pci_dev(pdev);
215 BUG_ON(!dev); 215 if (!WARN_ON(!dev))
216 216 kfd_signal_iommu_event(dev, pasid, address,
217 kfd_signal_iommu_event(dev, pasid, address,
218 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC); 217 flags & PPR_FAULT_WRITE, flags & PPR_FAULT_EXEC);
219 218
220 return AMD_IOMMU_INV_PRI_RSP_INVALID; 219 return AMD_IOMMU_INV_PRI_RSP_INVALID;
@@ -397,9 +396,12 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
397{ 396{
398 unsigned int num_of_longs; 397 unsigned int num_of_longs;
399 398
400 BUG_ON(buf_size < chunk_size); 399 if (WARN_ON(buf_size < chunk_size))
401 BUG_ON(buf_size == 0); 400 return -EINVAL;
402 BUG_ON(chunk_size == 0); 401 if (WARN_ON(buf_size == 0))
402 return -EINVAL;
403 if (WARN_ON(chunk_size == 0))
404 return -EINVAL;
403 405
404 kfd->gtt_sa_chunk_size = chunk_size; 406 kfd->gtt_sa_chunk_size = chunk_size;
405 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 407 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 2486dfb5b17f..e553c5e45264 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -388,7 +388,8 @@ static struct mqd_manager *get_mqd_manager_nocpsch(
388{ 388{
389 struct mqd_manager *mqd; 389 struct mqd_manager *mqd;
390 390
391 BUG_ON(type >= KFD_MQD_TYPE_MAX); 391 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
392 return NULL;
392 393
393 pr_debug("mqd type %d\n", type); 394 pr_debug("mqd type %d\n", type);
394 395
@@ -513,7 +514,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
513{ 514{
514 int i; 515 int i;
515 516
516 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0); 517 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
517 518
518 kfree(dqm->allocated_queues); 519 kfree(dqm->allocated_queues);
519 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) 520 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -1129,8 +1130,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1129 dqm->ops.set_cache_memory_policy = set_cache_memory_policy; 1130 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1130 break; 1131 break;
1131 default: 1132 default:
1132 BUG(); 1133 pr_err("Invalid scheduling policy %d\n", sched_policy);
1133 break; 1134 goto out_free;
1134 } 1135 }
1135 1136
1136 switch (dev->device_info->asic_family) { 1137 switch (dev->device_info->asic_family) {
@@ -1143,12 +1144,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1143 break; 1144 break;
1144 } 1145 }
1145 1146
1146 if (dqm->ops.initialize(dqm)) { 1147 if (!dqm->ops.initialize(dqm))
1147 kfree(dqm); 1148 return dqm;
1148 return NULL;
1149 }
1150 1149
1151 return dqm; 1150out_free:
1151 kfree(dqm);
1152 return NULL;
1152} 1153}
1153 1154
1154void device_queue_manager_uninit(struct device_queue_manager *dqm) 1155void device_queue_manager_uninit(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index 43194b43eea4..fadc56a8be71 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -65,7 +65,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
65 * for LDS/Scratch and GPUVM. 65 * for LDS/Scratch and GPUVM.
66 */ 66 */
67 67
68 BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || 68 WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
69 top_address_nybble == 0); 69 top_address_nybble == 0);
70 70
71 return PRIVATE_BASE(top_address_nybble << 12) | 71 return PRIVATE_BASE(top_address_nybble << 12) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index 47ef910b1663..15e81ae9d2f4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -67,7 +67,7 @@ static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
67 * for LDS/Scratch and GPUVM. 67 * for LDS/Scratch and GPUVM.
68 */ 68 */
69 69
70 BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE || 70 WARN_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
71 top_address_nybble == 0); 71 top_address_nybble == 0);
72 72
73 return top_address_nybble << 12 | 73 return top_address_nybble << 12 |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 970bc07ac370..0e4d4a98dc2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -41,7 +41,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
41 int retval; 41 int retval;
42 union PM4_MES_TYPE_3_HEADER nop; 42 union PM4_MES_TYPE_3_HEADER nop;
43 43
44 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); 44 if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
45 return false;
45 46
46 pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ, 47 pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
47 queue_size); 48 queue_size);
@@ -62,8 +63,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
62 KFD_MQD_TYPE_HIQ); 63 KFD_MQD_TYPE_HIQ);
63 break; 64 break;
64 default: 65 default:
65 BUG(); 66 pr_err("Invalid queue type %d\n", type);
66 break; 67 return false;
67 } 68 }
68 69
69 if (!kq->mqd) 70 if (!kq->mqd)
@@ -305,6 +306,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
305 kfree(kq); 306 kfree(kq);
306} 307}
307 308
309/* FIXME: Can this test be removed? */
308static __attribute__((unused)) void test_kq(struct kfd_dev *dev) 310static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
309{ 311{
310 struct kernel_queue *kq; 312 struct kernel_queue *kq;
@@ -314,10 +316,18 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
314 pr_err("Starting kernel queue test\n"); 316 pr_err("Starting kernel queue test\n");
315 317
316 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 318 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
317 BUG_ON(!kq); 319 if (unlikely(!kq)) {
320 pr_err(" Failed to initialize HIQ\n");
321 pr_err("Kernel queue test failed\n");
322 return;
323 }
318 324
319 retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer); 325 retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
320 BUG_ON(retval != 0); 326 if (unlikely(retval != 0)) {
327 pr_err(" Failed to acquire packet buffer\n");
328 pr_err("Kernel queue test failed\n");
329 return;
330 }
321 for (i = 0; i < 5; i++) 331 for (i = 0; i < 5; i++)
322 buffer[i] = kq->nop_packet; 332 buffer[i] = kq->nop_packet;
323 kq->ops.submit_packet(kq); 333 kq->ops.submit_packet(kq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index a11477dc1047..7e0ec6bb1637 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -387,7 +387,8 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
387{ 387{
388 struct mqd_manager *mqd; 388 struct mqd_manager *mqd;
389 389
390 BUG_ON(type >= KFD_MQD_TYPE_MAX); 390 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
391 return NULL;
391 392
392 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); 393 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
393 if (!mqd) 394 if (!mqd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index d638c2c92234..f4c8c2324d77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -233,7 +233,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
233{ 233{
234 struct mqd_manager *mqd; 234 struct mqd_manager *mqd;
235 235
236 BUG_ON(type >= KFD_MQD_TYPE_MAX); 236 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
237 return NULL;
237 238
238 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); 239 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
239 if (!mqd) 240 if (!mqd)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index aacd5a3d92b7..0816d11e469d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -35,7 +35,8 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
35{ 35{
36 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t); 36 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
37 37
38 BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes); 38 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes,
39 "Runlist IB overflow");
39 *wptr = temp; 40 *wptr = temp;
40} 41}
41 42
@@ -94,7 +95,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
94{ 95{
95 int retval; 96 int retval;
96 97
97 BUG_ON(pm->allocated); 98 if (WARN_ON(pm->allocated))
99 return -EINVAL;
98 100
99 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); 101 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
100 102
@@ -119,7 +121,8 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
119{ 121{
120 struct pm4_runlist *packet; 122 struct pm4_runlist *packet;
121 123
122 BUG_ON(!ib); 124 if (WARN_ON(!ib))
125 return -EFAULT;
123 126
124 packet = (struct pm4_runlist *)buffer; 127 packet = (struct pm4_runlist *)buffer;
125 128
@@ -211,9 +214,8 @@ static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
211 use_static = false; /* no static queues under SDMA */ 214 use_static = false; /* no static queues under SDMA */
212 break; 215 break;
213 default: 216 default:
214 pr_err("queue type %d\n", q->properties.type); 217 WARN(1, "queue type %d", q->properties.type);
215 BUG(); 218 return -EINVAL;
216 break;
217 } 219 }
218 packet->bitfields3.doorbell_offset = 220 packet->bitfields3.doorbell_offset =
219 q->properties.doorbell_off; 221 q->properties.doorbell_off;
@@ -266,8 +268,8 @@ static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
266 use_static = false; /* no static queues under SDMA */ 268 use_static = false; /* no static queues under SDMA */
267 break; 269 break;
268 default: 270 default:
269 BUG(); 271 WARN(1, "queue type %d", q->properties.type);
270 break; 272 return -EINVAL;
271 } 273 }
272 274
273 packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset = 275 packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
@@ -392,14 +394,16 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
392 pr_debug("Finished map process and queues to runlist\n"); 394 pr_debug("Finished map process and queues to runlist\n");
393 395
394 if (is_over_subscription) 396 if (is_over_subscription)
395 pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, 397 retval = pm_create_runlist(pm, &rl_buffer[rl_wptr],
396 alloc_size_bytes / sizeof(uint32_t), true); 398 *rl_gpu_addr,
399 alloc_size_bytes / sizeof(uint32_t),
400 true);
397 401
398 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++) 402 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
399 pr_debug("0x%2X ", rl_buffer[i]); 403 pr_debug("0x%2X ", rl_buffer[i]);
400 pr_debug("\n"); 404 pr_debug("\n");
401 405
402 return 0; 406 return retval;
403} 407}
404 408
405int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) 409int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
@@ -512,7 +516,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
512 int retval; 516 int retval;
513 struct pm4_query_status *packet; 517 struct pm4_query_status *packet;
514 518
515 BUG_ON(!fence_address); 519 if (WARN_ON(!fence_address))
520 return -EFAULT;
516 521
517 mutex_lock(&pm->lock); 522 mutex_lock(&pm->lock);
518 retval = pm->priv_queue->ops.acquire_packet_buffer( 523 retval = pm->priv_queue->ops.acquire_packet_buffer(
@@ -577,8 +582,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
577 engine_sel__mes_unmap_queues__sdma0 + sdma_engine; 582 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
578 break; 583 break;
579 default: 584 default:
580 BUG(); 585 WARN(1, "queue type %d", type);
581 break; 586 retval = -EINVAL;
587 goto err_invalid;
582 } 588 }
583 589
584 if (reset) 590 if (reset)
@@ -610,12 +616,18 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
610 queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only; 616 queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
611 break; 617 break;
612 default: 618 default:
613 BUG(); 619 WARN(1, "filter %d", mode);
614 break; 620 retval = -EINVAL;
621 goto err_invalid;
615 } 622 }
616 623
617 pm->priv_queue->ops.submit_packet(pm->priv_queue); 624 pm->priv_queue->ops.submit_packet(pm->priv_queue);
618 625
626 mutex_unlock(&pm->lock);
627 return 0;
628
629err_invalid:
630 pm->priv_queue->ops.rollback_packet(pm->priv_queue);
619err_acquire_packet_buffer: 631err_acquire_packet_buffer:
620 mutex_unlock(&pm->lock); 632 mutex_unlock(&pm->lock);
621 return retval; 633 return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index b3f7d431b9a6..1e06de0bc673 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -92,6 +92,6 @@ unsigned int kfd_pasid_alloc(void)
92 92
93void kfd_pasid_free(unsigned int pasid) 93void kfd_pasid_free(unsigned int pasid)
94{ 94{
95 BUG_ON(pasid == 0 || pasid >= pasid_limit); 95 if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
96 clear_bit(pasid, pasid_bitmap); 96 clear_bit(pasid, pasid_bitmap);
97} 97}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d030d76cef46..c74cf22a1ed9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,8 +79,6 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
79{ 79{
80 struct kfd_process *process; 80 struct kfd_process *process;
81 81
82 BUG_ON(!kfd_process_wq);
83
84 if (!thread->mm) 82 if (!thread->mm)
85 return ERR_PTR(-EINVAL); 83 return ERR_PTR(-EINVAL);
86 84
@@ -202,10 +200,8 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
202 struct kfd_process_release_work *work; 200 struct kfd_process_release_work *work;
203 struct kfd_process *p; 201 struct kfd_process *p;
204 202
205 BUG_ON(!kfd_process_wq);
206
207 p = container_of(rcu, struct kfd_process, rcu); 203 p = container_of(rcu, struct kfd_process, rcu);
208 BUG_ON(atomic_read(&p->mm->mm_count) <= 0); 204 WARN_ON(atomic_read(&p->mm->mm_count) <= 0);
209 205
210 mmdrop(p->mm); 206 mmdrop(p->mm);
211 207
@@ -229,7 +225,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
229 * mmu_notifier srcu is read locked 225 * mmu_notifier srcu is read locked
230 */ 226 */
231 p = container_of(mn, struct kfd_process, mmu_notifier); 227 p = container_of(mn, struct kfd_process, mmu_notifier);
232 BUG_ON(p->mm != mm); 228 if (WARN_ON(p->mm != mm))
229 return;
233 230
234 mutex_lock(&kfd_processes_mutex); 231 mutex_lock(&kfd_processes_mutex);
235 hash_del_rcu(&p->kfd_processes); 232 hash_del_rcu(&p->kfd_processes);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index f6ecdffbd415..1cae95e2b13a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -218,8 +218,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
218 kq, &pdd->qpd); 218 kq, &pdd->qpd);
219 break; 219 break;
220 default: 220 default:
221 BUG(); 221 WARN(1, "Invalid queue type %d", type);
222 break; 222 retval = -EINVAL;
223 } 223 }
224 224
225 if (retval != 0) { 225 if (retval != 0) {
@@ -272,7 +272,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
272 dev = pqn->kq->dev; 272 dev = pqn->kq->dev;
273 if (pqn->q) 273 if (pqn->q)
274 dev = pqn->q->device; 274 dev = pqn->q->device;
275 BUG_ON(!dev); 275 if (WARN_ON(!dev))
276 return -ENODEV;
276 277
277 pdd = kfd_get_process_device_data(dev, pqm->process); 278 pdd = kfd_get_process_device_data(dev, pqm->process);
278 if (!pdd) { 279 if (!pdd) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e5486f494c47..19ce59028d6b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -799,10 +799,12 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
799 int ret; 799 int ret;
800 uint32_t i; 800 uint32_t i;
801 801
802 if (WARN_ON(dev->kobj_node))
803 return -EEXIST;
804
802 /* 805 /*
803 * Creating the sysfs folders 806 * Creating the sysfs folders
804 */ 807 */
805 BUG_ON(dev->kobj_node);
806 dev->kobj_node = kfd_alloc_struct(dev->kobj_node); 808 dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
807 if (!dev->kobj_node) 809 if (!dev->kobj_node)
808 return -ENOMEM; 810 return -ENOMEM;