aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/cxl/native.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/cxl/native.c')
-rw-r--r--drivers/misc/cxl/native.c207
1 files changed, 165 insertions, 42 deletions
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 55d8a1459f28..3bcdaee11ba1 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -21,10 +21,10 @@
21#include "cxl.h" 21#include "cxl.h"
22#include "trace.h" 22#include "trace.h"
23 23
24static int afu_control(struct cxl_afu *afu, u64 command, 24static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25 u64 result, u64 mask, bool enabled) 25 u64 result, u64 mask, bool enabled)
26{ 26{
27 u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 27 u64 AFU_Cntl;
28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); 28 unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29 int rc = 0; 29 int rc = 0;
30 30
@@ -33,7 +33,8 @@ static int afu_control(struct cxl_afu *afu, u64 command,
33 33
34 trace_cxl_afu_ctrl(afu, command); 34 trace_cxl_afu_ctrl(afu, command);
35 35
36 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl | command); 36 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37 cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
37 38
38 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 39 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
39 while ((AFU_Cntl & mask) != result) { 40 while ((AFU_Cntl & mask) != result) {
@@ -54,6 +55,16 @@ static int afu_control(struct cxl_afu *afu, u64 command,
54 cpu_relax(); 55 cpu_relax();
55 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An); 56 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
56 }; 57 };
58
59 if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
60 /*
61 * Workaround for a bug in the XSL used in the Mellanox CX4
62 * that fails to clear the RA bit after an AFU reset,
63 * preventing subsequent AFU resets from working.
64 */
65 cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66 }
67
57 pr_devel("AFU command complete: %llx\n", command); 68 pr_devel("AFU command complete: %llx\n", command);
58 afu->enabled = enabled; 69 afu->enabled = enabled;
59out: 70out:
@@ -67,7 +78,7 @@ static int afu_enable(struct cxl_afu *afu)
67{ 78{
68 pr_devel("AFU enable request\n"); 79 pr_devel("AFU enable request\n");
69 80
70 return afu_control(afu, CXL_AFU_Cntl_An_E, 81 return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
71 CXL_AFU_Cntl_An_ES_Enabled, 82 CXL_AFU_Cntl_An_ES_Enabled,
72 CXL_AFU_Cntl_An_ES_MASK, true); 83 CXL_AFU_Cntl_An_ES_MASK, true);
73} 84}
@@ -76,7 +87,8 @@ int cxl_afu_disable(struct cxl_afu *afu)
76{ 87{
77 pr_devel("AFU disable request\n"); 88 pr_devel("AFU disable request\n");
78 89
79 return afu_control(afu, 0, CXL_AFU_Cntl_An_ES_Disabled, 90 return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91 CXL_AFU_Cntl_An_ES_Disabled,
80 CXL_AFU_Cntl_An_ES_MASK, false); 92 CXL_AFU_Cntl_An_ES_MASK, false);
81} 93}
82 94
@@ -85,7 +97,7 @@ static int native_afu_reset(struct cxl_afu *afu)
85{ 97{
86 pr_devel("AFU reset request\n"); 98 pr_devel("AFU reset request\n");
87 99
88 return afu_control(afu, CXL_AFU_Cntl_An_RA, 100 return afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
89 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, 101 CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
90 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, 102 CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
91 false); 103 false);
@@ -189,7 +201,7 @@ int cxl_alloc_spa(struct cxl_afu *afu)
189 unsigned spa_size; 201 unsigned spa_size;
190 202
191 /* Work out how many pages to allocate */ 203 /* Work out how many pages to allocate */
192 afu->native->spa_order = 0; 204 afu->native->spa_order = -1;
193 do { 205 do {
194 afu->native->spa_order++; 206 afu->native->spa_order++;
195 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; 207 spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
@@ -430,7 +442,6 @@ static int remove_process_element(struct cxl_context *ctx)
430 return rc; 442 return rc;
431} 443}
432 444
433
434void cxl_assign_psn_space(struct cxl_context *ctx) 445void cxl_assign_psn_space(struct cxl_context *ctx)
435{ 446{
436 if (!ctx->afu->pp_size || ctx->master) { 447 if (!ctx->afu->pp_size || ctx->master) {
@@ -507,10 +518,39 @@ static u64 calculate_sr(struct cxl_context *ctx)
507 return sr; 518 return sr;
508} 519}
509 520
521static void update_ivtes_directed(struct cxl_context *ctx)
522{
523 bool need_update = (ctx->status == STARTED);
524 int r;
525
526 if (need_update) {
527 WARN_ON(terminate_process_element(ctx));
528 WARN_ON(remove_process_element(ctx));
529 }
530
531 for (r = 0; r < CXL_IRQ_RANGES; r++) {
532 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
533 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
534 }
535
536 /*
537 * Theoretically we could use the update llcmd, instead of a
538 * terminate/remove/add (or if an atomic update was required we could
539 * do a suspend/update/resume), however it seems there might be issues
540 * with the update llcmd on some cards (including those using an XSL on
541 * an ASIC) so for now it's safest to go with the commands that are
542 * known to work. In the future if we come across a situation where the
543 * card may be performing transactions using the same PE while we are
544 * doing this update we might need to revisit this.
545 */
546 if (need_update)
547 WARN_ON(add_process_element(ctx));
548}
549
510static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) 550static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
511{ 551{
512 u32 pid; 552 u32 pid;
513 int r, result; 553 int result;
514 554
515 cxl_assign_psn_space(ctx); 555 cxl_assign_psn_space(ctx);
516 556
@@ -545,10 +585,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
545 ctx->irqs.range[0] = 1; 585 ctx->irqs.range[0] = 1;
546 } 586 }
547 587
548 for (r = 0; r < CXL_IRQ_RANGES; r++) { 588 update_ivtes_directed(ctx);
549 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
550 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
551 }
552 589
553 ctx->elem->common.amr = cpu_to_be64(amr); 590 ctx->elem->common.amr = cpu_to_be64(amr);
554 ctx->elem->common.wed = cpu_to_be64(wed); 591 ctx->elem->common.wed = cpu_to_be64(wed);
@@ -570,7 +607,33 @@ static int deactivate_afu_directed(struct cxl_afu *afu)
570 cxl_sysfs_afu_m_remove(afu); 607 cxl_sysfs_afu_m_remove(afu);
571 cxl_chardev_afu_remove(afu); 608 cxl_chardev_afu_remove(afu);
572 609
573 cxl_ops->afu_reset(afu); 610 /*
611 * The CAIA section 2.2.1 indicates that the procedure for starting and
612 * stopping an AFU in AFU directed mode is AFU specific, which is not
613 * ideal since this code is generic and with one exception has no
614 * knowledge of the AFU. This is in contrast to the procedure for
615 * disabling a dedicated process AFU, which is documented to just
616 * require a reset. The architecture does indicate that both an AFU
617 * reset and an AFU disable should result in the AFU being disabled and
618 * we do both followed by a PSL purge for safety.
619 *
620 * Notably we used to have some issues with the disable sequence on PSL
621 * cards, which is why we ended up using this heavy weight procedure in
622 * the first place, however a bug was discovered that had rendered the
623 * disable operation ineffective, so it is conceivable that was the
624 * sole explanation for those difficulties. Careful regression testing
625 * is recommended if anyone attempts to remove or reorder these
626 * operations.
627 *
628 * The XSL on the Mellanox CX4 behaves a little differently from the
629 * PSL based cards and will time out an AFU reset if the AFU is still
630 * enabled. That card is special in that we do have a means to identify
631 * it from this code, so in that case we skip the reset and just use a
632 * disable/purge to avoid the timeout and corresponding noise in the
633 * kernel log.
634 */
635 if (afu->adapter->native->sl_ops->needs_reset_before_disable)
636 cxl_ops->afu_reset(afu);
574 cxl_afu_disable(afu); 637 cxl_afu_disable(afu);
575 cxl_psl_purge(afu); 638 cxl_psl_purge(afu);
576 639
@@ -600,6 +663,22 @@ static int activate_dedicated_process(struct cxl_afu *afu)
600 return cxl_chardev_d_afu_add(afu); 663 return cxl_chardev_d_afu_add(afu);
601} 664}
602 665
666static void update_ivtes_dedicated(struct cxl_context *ctx)
667{
668 struct cxl_afu *afu = ctx->afu;
669
670 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
671 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
672 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
673 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
674 ((u64)ctx->irqs.offset[3] & 0xffff));
675 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
676 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
677 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
678 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
679 ((u64)ctx->irqs.range[3] & 0xffff));
680}
681
603static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) 682static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
604{ 683{
605 struct cxl_afu *afu = ctx->afu; 684 struct cxl_afu *afu = ctx->afu;
@@ -618,16 +697,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
618 697
619 cxl_prefault(ctx, wed); 698 cxl_prefault(ctx, wed);
620 699
621 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 700 update_ivtes_dedicated(ctx);
622 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
623 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
624 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
625 ((u64)ctx->irqs.offset[3] & 0xffff));
626 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
627 (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
628 (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
629 (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
630 ((u64)ctx->irqs.range[3] & 0xffff));
631 701
632 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr); 702 cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
633 703
@@ -703,12 +773,37 @@ static int native_attach_process(struct cxl_context *ctx, bool kernel,
703 773
704static inline int detach_process_native_dedicated(struct cxl_context *ctx) 774static inline int detach_process_native_dedicated(struct cxl_context *ctx)
705{ 775{
776 /*
777 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
778 * stop the AFU in dedicated mode (we therefore do not make that
779 * optional like we do in the afu directed path). It does not indicate
780 * that we need to do an explicit disable (which should occur
781 * implicitly as part of the reset) or purge, but we do these as well
782 * to be on the safe side.
783 *
784 * Notably we used to have some issues with the disable sequence
785 * (before the sequence was spelled out in the architecture) which is
786 * why we were so heavy weight in the first place, however a bug was
787 * discovered that had rendered the disable operation ineffective, so
788 * it is conceivable that was the sole explanation for those
789 * difficulties. Point is, we should be careful and do some regression
790 * testing if we ever attempt to remove any part of this procedure.
791 */
706 cxl_ops->afu_reset(ctx->afu); 792 cxl_ops->afu_reset(ctx->afu);
707 cxl_afu_disable(ctx->afu); 793 cxl_afu_disable(ctx->afu);
708 cxl_psl_purge(ctx->afu); 794 cxl_psl_purge(ctx->afu);
709 return 0; 795 return 0;
710} 796}
711 797
798static void native_update_ivtes(struct cxl_context *ctx)
799{
800 if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
801 return update_ivtes_directed(ctx);
802 if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
803 return update_ivtes_dedicated(ctx);
804 WARN(1, "native_update_ivtes: Bad mode\n");
805}
806
712static inline int detach_process_native_afu_directed(struct cxl_context *ctx) 807static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
713{ 808{
714 if (!ctx->pe_inserted) 809 if (!ctx->pe_inserted)
@@ -754,26 +849,38 @@ static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
754 return 0; 849 return 0;
755} 850}
756 851
757static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, 852void cxl_native_psl_irq_dump_regs(struct cxl_context *ctx)
758 u64 dsisr, u64 errstat)
759{ 853{
760 u64 fir1, fir2, fir_slice, serr, afu_debug; 854 u64 fir1, fir2, fir_slice, serr, afu_debug;
761 855
762 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); 856 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
763 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); 857 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
764 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); 858 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
765 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
766 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); 859 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
767 860
768 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
769 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1); 861 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
770 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2); 862 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
771 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 863 if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
864 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
865 cxl_afu_decode_psl_serr(ctx->afu, serr);
866 }
772 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 867 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
773 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 868 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
869}
870
871static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
872 u64 dsisr, u64 errstat)
873{
874
875 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
774 876
775 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); 877 if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
776 cxl_stop_trace(ctx->afu->adapter); 878 ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
879
880 if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
881 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
882 ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
883 }
777 884
778 return cxl_ops->ack_irq(ctx, 0, errstat); 885 return cxl_ops->ack_irq(ctx, 0, errstat);
779} 886}
@@ -849,41 +956,56 @@ void native_irq_wait(struct cxl_context *ctx)
849static irqreturn_t native_slice_irq_err(int irq, void *data) 956static irqreturn_t native_slice_irq_err(int irq, void *data)
850{ 957{
851 struct cxl_afu *afu = data; 958 struct cxl_afu *afu = data;
852 u64 fir_slice, errstat, serr, afu_debug; 959 u64 fir_slice, errstat, serr, afu_debug, afu_error, dsisr;
853
854 WARN(irq, "CXL SLICE ERROR interrupt %i\n", irq);
855 960
961 /*
962 * slice err interrupt is only used with full PSL (no XSL)
963 */
856 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 964 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
857 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An); 965 fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
858 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); 966 errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
859 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An); 967 afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
860 dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr); 968 afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
969 dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
970 cxl_afu_decode_psl_serr(afu, serr);
861 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice); 971 dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
862 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat); 972 dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
863 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug); 973 dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
974 dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
975 dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
864 976
865 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr); 977 cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
866 978
867 return IRQ_HANDLED; 979 return IRQ_HANDLED;
868} 980}
869 981
982void cxl_native_err_irq_dump_regs(struct cxl *adapter)
983{
984 u64 fir1, fir2;
985
986 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
987 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
988
989 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
990}
991
870static irqreturn_t native_irq_err(int irq, void *data) 992static irqreturn_t native_irq_err(int irq, void *data)
871{ 993{
872 struct cxl *adapter = data; 994 struct cxl *adapter = data;
873 u64 fir1, fir2, err_ivte; 995 u64 err_ivte;
874 996
875 WARN(1, "CXL ERROR interrupt %i\n", irq); 997 WARN(1, "CXL ERROR interrupt %i\n", irq);
876 998
877 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE); 999 err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
878 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte); 1000 dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
879 1001
880 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n"); 1002 if (adapter->native->sl_ops->debugfs_stop_trace) {
881 cxl_stop_trace(adapter); 1003 dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
882 1004 adapter->native->sl_ops->debugfs_stop_trace(adapter);
883 fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1); 1005 }
884 fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
885 1006
886 dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2); 1007 if (adapter->native->sl_ops->err_irq_dump_registers)
1008 adapter->native->sl_ops->err_irq_dump_registers(adapter);
887 1009
888 return IRQ_HANDLED; 1010 return IRQ_HANDLED;
889} 1011}
@@ -1128,6 +1250,7 @@ const struct cxl_backend_ops cxl_native_ops = {
1128 .irq_wait = native_irq_wait, 1250 .irq_wait = native_irq_wait,
1129 .attach_process = native_attach_process, 1251 .attach_process = native_attach_process,
1130 .detach_process = native_detach_process, 1252 .detach_process = native_detach_process,
1253 .update_ivtes = native_update_ivtes,
1131 .support_attributes = native_support_attributes, 1254 .support_attributes = native_support_attributes,
1132 .link_ok = cxl_adapter_link_ok, 1255 .link_ok = cxl_adapter_link_ok,
1133 .release_afu = cxl_pci_release_afu, 1256 .release_afu = cxl_pci_release_afu,