aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c37
9 files changed, 115 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4376b17ca594..56f8ca2a3bb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -464,8 +464,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
464 } 464 }
465 } 465 }
466 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) { 466 if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
467 if ((adev->flags & AMD_IS_PX) && 467 if (adev->flags & AMD_IS_PX) {
468 amdgpu_atpx_dgpu_req_power_for_displays()) {
469 pm_runtime_get_sync(adev->ddev->dev); 468 pm_runtime_get_sync(adev->ddev->dev);
470 /* Just fire off a uevent and let userspace tell us what to do */ 469 /* Just fire off a uevent and let userspace tell us what to do */
471 drm_helper_hpd_irq_event(adev->ddev); 470 drm_helper_hpd_irq_event(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 95144e49c7f9..34471dbaa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -342,6 +342,16 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
342 if (current_level == level) 342 if (current_level == level)
343 return count; 343 return count;
344 344
345 /* profile_exit setting is valid only when current mode is in profile mode */
346 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
347 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
348 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
349 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
351 pr_err("Currently not in any profile mode!\n");
352 return -EINVAL;
353 }
354
345 if (is_support_sw_smu(adev)) { 355 if (is_support_sw_smu(adev)) {
346 mutex_lock(&adev->pm.mutex); 356 mutex_lock(&adev->pm.mutex);
347 if (adev->pm.dpm.thermal_active) { 357 if (adev->pm.dpm.thermal_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 905cce1814f3..05897b05766b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -38,18 +38,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
38static int psp_early_init(void *handle) 38static int psp_early_init(void *handle)
39{ 39{
40 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 40 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
41 struct psp_context *psp = &adev->psp;
41 42
42 psp_set_funcs(adev); 43 psp_set_funcs(adev);
43 44
44 return 0;
45}
46
47static int psp_sw_init(void *handle)
48{
49 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
50 struct psp_context *psp = &adev->psp;
51 int ret;
52
53 switch (adev->asic_type) { 45 switch (adev->asic_type) {
54 case CHIP_VEGA10: 46 case CHIP_VEGA10:
55 case CHIP_VEGA12: 47 case CHIP_VEGA12:
@@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
67 59
68 psp->adev = adev; 60 psp->adev = adev;
69 61
62 return 0;
63}
64
65static int psp_sw_init(void *handle)
66{
67 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
68 struct psp_context *psp = &adev->psp;
69 int ret;
70
70 ret = psp_init_microcode(psp); 71 ret = psp_init_microcode(psp);
71 if (ret) { 72 if (ret) {
72 DRM_ERROR("Failed to load psp firmware!\n"); 73 DRM_ERROR("Failed to load psp firmware!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a07c85815b7a..4f10f5aba00b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2757,6 +2757,37 @@ error_free_sched_entity:
2757} 2757}
2758 2758
2759/** 2759/**
2760 * amdgpu_vm_check_clean_reserved - check if a VM is clean
2761 *
2762 * @adev: amdgpu_device pointer
2763 * @vm: the VM to check
2764 *
2765 * check all entries of the root PD, if any subsequent PDs are allocated,
2766 * it means there are page table creating and filling, and is no a clean
2767 * VM
2768 *
2769 * Returns:
2770 * 0 if this VM is clean
2771 */
2772static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev,
2773 struct amdgpu_vm *vm)
2774{
2775 enum amdgpu_vm_level root = adev->vm_manager.root_level;
2776 unsigned int entries = amdgpu_vm_num_entries(adev, root);
2777 unsigned int i = 0;
2778
2779 if (!(vm->root.entries))
2780 return 0;
2781
2782 for (i = 0; i < entries; i++) {
2783 if (vm->root.entries[i].base.bo)
2784 return -EINVAL;
2785 }
2786
2787 return 0;
2788}
2789
2790/**
2760 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2791 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2761 * 2792 *
2762 * @adev: amdgpu_device pointer 2793 * @adev: amdgpu_device pointer
@@ -2786,10 +2817,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
2786 return r; 2817 return r;
2787 2818
2788 /* Sanity checks */ 2819 /* Sanity checks */
2789 if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { 2820 r = amdgpu_vm_check_clean_reserved(adev, vm);
2790 r = -EINVAL; 2821 if (r)
2791 goto unreserve_bo; 2822 goto unreserve_bo;
2792 }
2793 2823
2794 if (pasid) { 2824 if (pasid) {
2795 unsigned long flags; 2825 unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8dbad496b29f..2471e7cf75ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -372,6 +372,9 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
372 if (amdgpu_sriov_runtime(adev)) 372 if (amdgpu_sriov_runtime(adev))
373 schedule_work(&adev->virt.flr_work); 373 schedule_work(&adev->virt.flr_work);
374 break; 374 break;
375 case IDH_QUERY_ALIVE:
376 xgpu_ai_mailbox_send_ack(adev);
377 break;
375 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore 378 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
376 * it byfar since that polling thread will handle it, 379 * it byfar since that polling thread will handle it,
377 * other msg like flr complete is not handled here. 380 * other msg like flr complete is not handled here.
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 39d151b79153..077e91a33d62 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -49,6 +49,7 @@ enum idh_event {
49 IDH_FLR_NOTIFICATION_CMPL, 49 IDH_FLR_NOTIFICATION_CMPL,
50 IDH_SUCCESS, 50 IDH_SUCCESS,
51 IDH_FAIL, 51 IDH_FAIL,
52 IDH_QUERY_ALIVE,
52 IDH_EVENT_MAX 53 IDH_EVENT_MAX
53}; 54};
54 55
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index dc461df48da0..2191d3d0a219 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -787,10 +787,13 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
787 0xFFFFFFFF, 0x00000004); 787 0xFFFFFFFF, 0x00000004);
788 /* mc resume*/ 788 /* mc resume*/
789 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 789 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
790 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 790 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
791 lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 791 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
792 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 792 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo);
793 upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); 793 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i,
794 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
795 adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi);
796 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0);
794 offset = 0; 797 offset = 0;
795 } else { 798 } else {
796 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 799 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
@@ -798,10 +801,11 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
798 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 801 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
799 upper_32_bits(adev->uvd.inst[i].gpu_addr)); 802 upper_32_bits(adev->uvd.inst[i].gpu_addr));
800 offset = size; 803 offset = size;
804 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
805 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
806
801 } 807 }
802 808
803 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
804 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
805 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); 809 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size);
806 810
807 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 811 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f3f5938430d4..c0ec27991c22 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -244,13 +244,18 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0); 244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0); 245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
246 246
247 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
247 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 248 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
249 uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
250 uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
251 uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
252
248 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 253 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
249 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), 254 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
250 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 8);
251 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
252 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), 256 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
253 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].mc_addr >> 40) & 0xff); 257 (tmr_mc_addr >> 40) & 0xff);
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
254 } else { 259 } else {
255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 260 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
256 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), 261 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
@@ -258,6 +263,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 263 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
259 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0), 264 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
260 (adev->vce.gpu_addr >> 40) & 0xff); 265 (adev->vce.gpu_addr >> 40) & 0xff);
266 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
267 offset & ~0x0f000000);
268
261 } 269 }
262 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, 270 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
263 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), 271 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
@@ -272,10 +280,7 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
272 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), 280 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
273 (adev->vce.gpu_addr >> 40) & 0xff); 281 (adev->vce.gpu_addr >> 40) & 0xff);
274 282
275 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
276 size = VCE_V4_0_FW_SIZE; 283 size = VCE_V4_0_FW_SIZE;
277 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
278 offset & ~0x0f000000);
279 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size); 284 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
280 285
281 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0; 286 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1b2f69a9a24e..8d89ab7f0ae8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -31,7 +31,7 @@
31#include "soc15_common.h" 31#include "soc15_common.h"
32#include "vega10_ih.h" 32#include "vega10_ih.h"
33 33
34 34#define MAX_REARM_RETRY 10
35 35
36static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev); 36static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
37 37
@@ -382,6 +382,38 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
382} 382}
383 383
384/** 384/**
385 * vega10_ih_irq_rearm - rearm IRQ if lost
386 *
387 * @adev: amdgpu_device pointer
388 *
389 */
390static void vega10_ih_irq_rearm(struct amdgpu_device *adev,
391 struct amdgpu_ih_ring *ih)
392{
393 uint32_t reg_rptr = 0;
394 uint32_t v = 0;
395 uint32_t i = 0;
396
397 if (ih == &adev->irq.ih)
398 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
399 else if (ih == &adev->irq.ih1)
400 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
401 else if (ih == &adev->irq.ih2)
402 reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
403 else
404 return;
405
406 /* Rearm IRQ / re-wwrite doorbell if doorbell write is lost */
407 for (i = 0; i < MAX_REARM_RETRY; i++) {
408 v = RREG32_NO_KIQ(reg_rptr);
409 if ((v < ih->ring_size) && (v != ih->rptr))
410 WDOORBELL32(ih->doorbell_index, ih->rptr);
411 else
412 break;
413 }
414}
415
416/**
385 * vega10_ih_set_rptr - set the IH ring buffer rptr 417 * vega10_ih_set_rptr - set the IH ring buffer rptr
386 * 418 *
387 * @adev: amdgpu_device pointer 419 * @adev: amdgpu_device pointer
@@ -395,6 +427,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
395 /* XXX check if swapping is necessary on BE */ 427 /* XXX check if swapping is necessary on BE */
396 *ih->rptr_cpu = ih->rptr; 428 *ih->rptr_cpu = ih->rptr;
397 WDOORBELL32(ih->doorbell_index, ih->rptr); 429 WDOORBELL32(ih->doorbell_index, ih->rptr);
430
431 if (amdgpu_sriov_vf(adev))
432 vega10_ih_irq_rearm(adev, ih);
398 } else if (ih == &adev->irq.ih) { 433 } else if (ih == &adev->irq.ih) {
399 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr); 434 WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
400 } else if (ih == &adev->irq.ih1) { 435 } else if (ih == &adev->irq.ih1) {