diff options
Diffstat (limited to 'drivers/misc/cxl/pci.c')
-rw-r--r-- | drivers/misc/cxl/pci.c | 484 |
1 files changed, 441 insertions, 43 deletions
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index a08fcc888a71..d152e2de8c93 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
@@ -55,6 +55,8 @@ | |||
55 | pci_read_config_byte(dev, vsec + 0xa, dest) | 55 | pci_read_config_byte(dev, vsec + 0xa, dest) |
56 | #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ | 56 | #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \ |
57 | pci_write_config_byte(dev, vsec + 0xa, val) | 57 | pci_write_config_byte(dev, vsec + 0xa, val) |
58 | #define CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, vsec, val) \ | ||
59 | pci_bus_write_config_byte(bus, devfn, vsec + 0xa, val) | ||
58 | #define CXL_VSEC_PROTOCOL_MASK 0xe0 | 60 | #define CXL_VSEC_PROTOCOL_MASK 0xe0 |
59 | #define CXL_VSEC_PROTOCOL_1024TB 0x80 | 61 | #define CXL_VSEC_PROTOCOL_1024TB 0x80 |
60 | #define CXL_VSEC_PROTOCOL_512TB 0x40 | 62 | #define CXL_VSEC_PROTOCOL_512TB 0x40 |
@@ -352,13 +354,10 @@ static u64 get_capp_unit_id(struct device_node *np) | |||
352 | return 0; | 354 | return 0; |
353 | } | 355 | } |
354 | 356 | ||
355 | static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev) | 357 | static int calc_capp_routing(struct pci_dev *dev, u64 *chipid, u64 *capp_unit_id) |
356 | { | 358 | { |
357 | struct device_node *np; | 359 | struct device_node *np; |
358 | const __be32 *prop; | 360 | const __be32 *prop; |
359 | u64 psl_dsnctl; | ||
360 | u64 chipid; | ||
361 | u64 capp_unit_id; | ||
362 | 361 | ||
363 | if (!(np = pnv_pci_get_phb_node(dev))) | 362 | if (!(np = pnv_pci_get_phb_node(dev))) |
364 | return -ENODEV; | 363 | return -ENODEV; |
@@ -367,14 +366,28 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev | |||
367 | np = of_get_next_parent(np); | 366 | np = of_get_next_parent(np); |
368 | if (!np) | 367 | if (!np) |
369 | return -ENODEV; | 368 | return -ENODEV; |
370 | chipid = be32_to_cpup(prop); | 369 | *chipid = be32_to_cpup(prop); |
371 | capp_unit_id = get_capp_unit_id(np); | 370 | *capp_unit_id = get_capp_unit_id(np); |
372 | of_node_put(np); | 371 | of_node_put(np); |
373 | if (!capp_unit_id) { | 372 | if (!*capp_unit_id) { |
374 | pr_err("cxl: invalid capp unit id\n"); | 373 | pr_err("cxl: invalid capp unit id\n"); |
375 | return -ENODEV; | 374 | return -ENODEV; |
376 | } | 375 | } |
377 | 376 | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static int init_implementation_adapter_psl_regs(struct cxl *adapter, struct pci_dev *dev) | ||
381 | { | ||
382 | u64 psl_dsnctl; | ||
383 | u64 chipid; | ||
384 | u64 capp_unit_id; | ||
385 | int rc; | ||
386 | |||
387 | rc = calc_capp_routing(dev, &chipid, &capp_unit_id); | ||
388 | if (rc) | ||
389 | return rc; | ||
390 | |||
378 | psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ | 391 | psl_dsnctl = 0x0000900000000000ULL; /* pteupd ttype, scdone */ |
379 | psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ | 392 | psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ |
380 | /* Tell PSL where to route data to */ | 393 | /* Tell PSL where to route data to */ |
@@ -393,8 +406,61 @@ static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev | |||
393 | return 0; | 406 | return 0; |
394 | } | 407 | } |
395 | 408 | ||
409 | static int init_implementation_adapter_xsl_regs(struct cxl *adapter, struct pci_dev *dev) | ||
410 | { | ||
411 | u64 xsl_dsnctl; | ||
412 | u64 chipid; | ||
413 | u64 capp_unit_id; | ||
414 | int rc; | ||
415 | |||
416 | rc = calc_capp_routing(dev, &chipid, &capp_unit_id); | ||
417 | if (rc) | ||
418 | return rc; | ||
419 | |||
420 | /* Tell XSL where to route data to */ | ||
421 | xsl_dsnctl = 0x0000600000000000ULL | (chipid << (63-5)); | ||
422 | xsl_dsnctl |= (capp_unit_id << (63-13)); | ||
423 | cxl_p1_write(adapter, CXL_XSL_DSNCTL, xsl_dsnctl); | ||
424 | |||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | /* PSL & XSL */ | ||
429 | #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3)) | ||
396 | #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) | 430 | #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6)) |
397 | #define _2048_250MHZ_CYCLES 1 | 431 | /* For the PSL this is a multiple for 0 < n <= 7: */ |
432 | #define PSL_2048_250MHZ_CYCLES 1 | ||
433 | |||
434 | static void write_timebase_ctrl_psl(struct cxl *adapter) | ||
435 | { | ||
436 | cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, | ||
437 | TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES)); | ||
438 | } | ||
439 | |||
440 | /* XSL */ | ||
441 | #define TBSYNC_ENA (1ULL << 63) | ||
442 | /* For the XSL this is 2**n * 2000 clocks for 0 < n <= 6: */ | ||
443 | #define XSL_2000_CLOCKS 1 | ||
444 | #define XSL_4000_CLOCKS 2 | ||
445 | #define XSL_8000_CLOCKS 3 | ||
446 | |||
447 | static void write_timebase_ctrl_xsl(struct cxl *adapter) | ||
448 | { | ||
449 | cxl_p1_write(adapter, CXL_XSL_TB_CTLSTAT, | ||
450 | TBSYNC_ENA | | ||
451 | TBSYNC_CAL(3) | | ||
452 | TBSYNC_CNT(XSL_4000_CLOCKS)); | ||
453 | } | ||
454 | |||
455 | static u64 timebase_read_psl(struct cxl *adapter) | ||
456 | { | ||
457 | return cxl_p1_read(adapter, CXL_PSL_Timebase); | ||
458 | } | ||
459 | |||
460 | static u64 timebase_read_xsl(struct cxl *adapter) | ||
461 | { | ||
462 | return cxl_p1_read(adapter, CXL_XSL_Timebase); | ||
463 | } | ||
398 | 464 | ||
399 | static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) | 465 | static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) |
400 | { | 466 | { |
@@ -421,8 +487,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) | |||
421 | * Setup PSL Timebase Control and Status register | 487 | * Setup PSL Timebase Control and Status register |
422 | * with the recommended Timebase Sync Count value | 488 | * with the recommended Timebase Sync Count value |
423 | */ | 489 | */ |
424 | cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT, | 490 | adapter->native->sl_ops->write_timebase_ctrl(adapter); |
425 | TBSYNC_CNT(2 * _2048_250MHZ_CYCLES)); | ||
426 | 491 | ||
427 | /* Enable PSL Timebase */ | 492 | /* Enable PSL Timebase */ |
428 | cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); | 493 | cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000); |
@@ -435,7 +500,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) | |||
435 | dev_info(&dev->dev, "PSL timebase can't synchronize\n"); | 500 | dev_info(&dev->dev, "PSL timebase can't synchronize\n"); |
436 | return; | 501 | return; |
437 | } | 502 | } |
438 | psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase); | 503 | psl_tb = adapter->native->sl_ops->timebase_read(adapter); |
439 | delta = mftb() - psl_tb; | 504 | delta = mftb() - psl_tb; |
440 | if (delta < 0) | 505 | if (delta < 0) |
441 | delta = -delta; | 506 | delta = -delta; |
@@ -445,7 +510,7 @@ static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev) | |||
445 | return; | 510 | return; |
446 | } | 511 | } |
447 | 512 | ||
448 | static int init_implementation_afu_regs(struct cxl_afu *afu) | 513 | static int init_implementation_afu_psl_regs(struct cxl_afu *afu) |
449 | { | 514 | { |
450 | /* read/write masks for this slice */ | 515 | /* read/write masks for this slice */ |
451 | cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); | 516 | cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL); |
@@ -551,36 +616,234 @@ static int setup_cxl_bars(struct pci_dev *dev) | |||
551 | return 0; | 616 | return 0; |
552 | } | 617 | } |
553 | 618 | ||
554 | /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */ | 619 | #ifdef CONFIG_CXL_BIMODAL |
555 | static int switch_card_to_cxl(struct pci_dev *dev) | 620 | |
556 | { | 621 | struct cxl_switch_work { |
622 | struct pci_dev *dev; | ||
623 | struct work_struct work; | ||
557 | int vsec; | 624 | int vsec; |
625 | int mode; | ||
626 | }; | ||
627 | |||
628 | static void switch_card_to_cxl(struct work_struct *work) | ||
629 | { | ||
630 | struct cxl_switch_work *switch_work = | ||
631 | container_of(work, struct cxl_switch_work, work); | ||
632 | struct pci_dev *dev = switch_work->dev; | ||
633 | struct pci_bus *bus = dev->bus; | ||
634 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
635 | struct pci_dev *bridge; | ||
636 | struct pnv_php_slot *php_slot; | ||
637 | unsigned int devfn; | ||
558 | u8 val; | 638 | u8 val; |
559 | int rc; | 639 | int rc; |
560 | 640 | ||
561 | dev_info(&dev->dev, "switch card to CXL\n"); | 641 | dev_info(&bus->dev, "cxl: Preparing for mode switch...\n"); |
642 | bridge = list_first_entry_or_null(&hose->bus->devices, struct pci_dev, | ||
643 | bus_list); | ||
644 | if (!bridge) { | ||
645 | dev_WARN(&bus->dev, "cxl: Couldn't find root port!\n"); | ||
646 | goto err_dev_put; | ||
647 | } | ||
562 | 648 | ||
563 | if (!(vsec = find_cxl_vsec(dev))) { | 649 | php_slot = pnv_php_find_slot(pci_device_to_OF_node(bridge)); |
564 | dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); | 650 | if (!php_slot) { |
651 | dev_err(&bus->dev, "cxl: Failed to find slot hotplug " | ||
652 | "information. You may need to upgrade " | ||
653 | "skiboot. Aborting.\n"); | ||
654 | goto err_dev_put; | ||
655 | } | ||
656 | |||
657 | rc = CXL_READ_VSEC_MODE_CONTROL(dev, switch_work->vsec, &val); | ||
658 | if (rc) { | ||
659 | dev_err(&bus->dev, "cxl: Failed to read CAPI mode control: %i\n", rc); | ||
660 | goto err_dev_put; | ||
661 | } | ||
662 | devfn = dev->devfn; | ||
663 | |||
664 | /* Release the reference obtained in cxl_check_and_switch_mode() */ | ||
665 | pci_dev_put(dev); | ||
666 | |||
667 | dev_dbg(&bus->dev, "cxl: Removing PCI devices from kernel\n"); | ||
668 | pci_lock_rescan_remove(); | ||
669 | pci_hp_remove_devices(bridge->subordinate); | ||
670 | pci_unlock_rescan_remove(); | ||
671 | |||
672 | /* Switch the CXL protocol on the card */ | ||
673 | if (switch_work->mode == CXL_BIMODE_CXL) { | ||
674 | dev_info(&bus->dev, "cxl: Switching card to CXL mode\n"); | ||
675 | val &= ~CXL_VSEC_PROTOCOL_MASK; | ||
676 | val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; | ||
677 | rc = pnv_cxl_enable_phb_kernel_api(hose, true); | ||
678 | if (rc) { | ||
679 | dev_err(&bus->dev, "cxl: Failed to enable kernel API" | ||
680 | " on real PHB, aborting\n"); | ||
681 | goto err_free_work; | ||
682 | } | ||
683 | } else { | ||
684 | dev_WARN(&bus->dev, "cxl: Switching card to PCI mode not supported!\n"); | ||
685 | goto err_free_work; | ||
686 | } | ||
687 | |||
688 | rc = CXL_WRITE_VSEC_MODE_CONTROL_BUS(bus, devfn, switch_work->vsec, val); | ||
689 | if (rc) { | ||
690 | dev_err(&bus->dev, "cxl: Failed to configure CXL protocol: %i\n", rc); | ||
691 | goto err_free_work; | ||
692 | } | ||
693 | |||
694 | /* | ||
695 | * The CAIA spec (v1.1, Section 10.6 Bi-modal Device Support) states | ||
696 | * we must wait 100ms after this mode switch before touching PCIe config | ||
697 | * space. | ||
698 | */ | ||
699 | msleep(100); | ||
700 | |||
701 | /* | ||
702 | * Hot reset to cause the card to come back in cxl mode. A | ||
703 | * OPAL_RESET_PCI_LINK would be sufficient, but currently lacks support | ||
704 | * in skiboot, so we use a hot reset instead. | ||
705 | * | ||
706 | * We call pci_set_pcie_reset_state() on the bridge, as a CAPI card is | ||
707 | * guaranteed to sit directly under the root port, and setting the reset | ||
708 | * state on a device directly under the root port is equivalent to doing | ||
709 | * it on the root port iself. | ||
710 | */ | ||
711 | dev_info(&bus->dev, "cxl: Configuration write complete, resetting card\n"); | ||
712 | pci_set_pcie_reset_state(bridge, pcie_hot_reset); | ||
713 | pci_set_pcie_reset_state(bridge, pcie_deassert_reset); | ||
714 | |||
715 | dev_dbg(&bus->dev, "cxl: Offlining slot\n"); | ||
716 | rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_OFFLINE); | ||
717 | if (rc) { | ||
718 | dev_err(&bus->dev, "cxl: OPAL offlining call failed: %i\n", rc); | ||
719 | goto err_free_work; | ||
720 | } | ||
721 | |||
722 | dev_dbg(&bus->dev, "cxl: Onlining and probing slot\n"); | ||
723 | rc = pnv_php_set_slot_power_state(&php_slot->slot, OPAL_PCI_SLOT_ONLINE); | ||
724 | if (rc) { | ||
725 | dev_err(&bus->dev, "cxl: OPAL onlining call failed: %i\n", rc); | ||
726 | goto err_free_work; | ||
727 | } | ||
728 | |||
729 | pci_lock_rescan_remove(); | ||
730 | pci_hp_add_devices(bridge->subordinate); | ||
731 | pci_unlock_rescan_remove(); | ||
732 | |||
733 | dev_info(&bus->dev, "cxl: CAPI mode switch completed\n"); | ||
734 | kfree(switch_work); | ||
735 | return; | ||
736 | |||
737 | err_dev_put: | ||
738 | /* Release the reference obtained in cxl_check_and_switch_mode() */ | ||
739 | pci_dev_put(dev); | ||
740 | err_free_work: | ||
741 | kfree(switch_work); | ||
742 | } | ||
743 | |||
744 | int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec) | ||
745 | { | ||
746 | struct cxl_switch_work *work; | ||
747 | u8 val; | ||
748 | int rc; | ||
749 | |||
750 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | ||
565 | return -ENODEV; | 751 | return -ENODEV; |
752 | |||
753 | if (!vsec) { | ||
754 | vsec = find_cxl_vsec(dev); | ||
755 | if (!vsec) { | ||
756 | dev_info(&dev->dev, "CXL VSEC not found\n"); | ||
757 | return -ENODEV; | ||
758 | } | ||
566 | } | 759 | } |
567 | 760 | ||
568 | if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) { | 761 | rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); |
569 | dev_err(&dev->dev, "failed to read current mode control: %i", rc); | 762 | if (rc) { |
763 | dev_err(&dev->dev, "Failed to read current mode control: %i", rc); | ||
570 | return rc; | 764 | return rc; |
571 | } | 765 | } |
572 | val &= ~CXL_VSEC_PROTOCOL_MASK; | 766 | |
573 | val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE; | 767 | if (mode == CXL_BIMODE_PCI) { |
574 | if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) { | 768 | if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { |
575 | dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); | 769 | dev_info(&dev->dev, "Card is already in PCI mode\n"); |
576 | return rc; | 770 | return 0; |
771 | } | ||
772 | /* | ||
773 | * TODO: Before it's safe to switch the card back to PCI mode | ||
774 | * we need to disable the CAPP and make sure any cachelines the | ||
775 | * card holds have been flushed out. Needs skiboot support. | ||
776 | */ | ||
777 | dev_WARN(&dev->dev, "CXL mode switch to PCI unsupported!\n"); | ||
778 | return -EIO; | ||
577 | } | 779 | } |
780 | |||
781 | if (val & CXL_VSEC_PROTOCOL_ENABLE) { | ||
782 | dev_info(&dev->dev, "Card is already in CXL mode\n"); | ||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | dev_info(&dev->dev, "Card is in PCI mode, scheduling kernel thread " | ||
787 | "to switch to CXL mode\n"); | ||
788 | |||
789 | work = kmalloc(sizeof(struct cxl_switch_work), GFP_KERNEL); | ||
790 | if (!work) | ||
791 | return -ENOMEM; | ||
792 | |||
793 | pci_dev_get(dev); | ||
794 | work->dev = dev; | ||
795 | work->vsec = vsec; | ||
796 | work->mode = mode; | ||
797 | INIT_WORK(&work->work, switch_card_to_cxl); | ||
798 | |||
799 | schedule_work(&work->work); | ||
800 | |||
578 | /* | 801 | /* |
579 | * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states | 802 | * We return a failure now to abort the driver init. Once the |
580 | * we must wait 100ms after this mode switch before touching | 803 | * link has been cycled and the card is in cxl mode we will |
581 | * PCIe config space. | 804 | * come back (possibly using the generic cxl driver), but |
805 | * return success as the card should then be in cxl mode. | ||
806 | * | ||
807 | * TODO: What if the card comes back in PCI mode even after | ||
808 | * the switch? Don't want to spin endlessly. | ||
582 | */ | 809 | */ |
583 | msleep(100); | 810 | return -EBUSY; |
811 | } | ||
812 | EXPORT_SYMBOL_GPL(cxl_check_and_switch_mode); | ||
813 | |||
814 | #endif /* CONFIG_CXL_BIMODAL */ | ||
815 | |||
816 | static int setup_cxl_protocol_area(struct pci_dev *dev) | ||
817 | { | ||
818 | u8 val; | ||
819 | int rc; | ||
820 | int vsec = find_cxl_vsec(dev); | ||
821 | |||
822 | if (!vsec) { | ||
823 | dev_info(&dev->dev, "CXL VSEC not found\n"); | ||
824 | return -ENODEV; | ||
825 | } | ||
826 | |||
827 | rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val); | ||
828 | if (rc) { | ||
829 | dev_err(&dev->dev, "Failed to read current mode control: %i\n", rc); | ||
830 | return rc; | ||
831 | } | ||
832 | |||
833 | if (!(val & CXL_VSEC_PROTOCOL_ENABLE)) { | ||
834 | dev_err(&dev->dev, "Card not in CAPI mode!\n"); | ||
835 | return -EIO; | ||
836 | } | ||
837 | |||
838 | if ((val & CXL_VSEC_PROTOCOL_MASK) != CXL_VSEC_PROTOCOL_256TB) { | ||
839 | val &= ~CXL_VSEC_PROTOCOL_MASK; | ||
840 | val |= CXL_VSEC_PROTOCOL_256TB; | ||
841 | rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val); | ||
842 | if (rc) { | ||
843 | dev_err(&dev->dev, "Failed to set CXL protocol area: %i\n", rc); | ||
844 | return rc; | ||
845 | } | ||
846 | } | ||
584 | 847 | ||
585 | return 0; | 848 | return 0; |
586 | } | 849 | } |
@@ -712,6 +975,21 @@ static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu) | |||
712 | } | 975 | } |
713 | } | 976 | } |
714 | 977 | ||
978 | if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { | ||
979 | /* | ||
980 | * We could also check this for the dedicated process model | ||
981 | * since the architecture indicates it should be set to 1, but | ||
982 | * in that case we ignore the value and I'd rather not risk | ||
983 | * breaking any existing dedicated process AFUs that left it as | ||
984 | * 0 (not that I'm aware of any). It is clearly an error for an | ||
985 | * AFU directed AFU to set this to 0, and would have previously | ||
986 | * triggered a bug resulting in the maximum not being enforced | ||
987 | * at all since idr_alloc treats 0 as no maximum. | ||
988 | */ | ||
989 | dev_err(&afu->dev, "AFU does not support any processes\n"); | ||
990 | return -EINVAL; | ||
991 | } | ||
992 | |||
715 | return 0; | 993 | return 0; |
716 | } | 994 | } |
717 | 995 | ||
@@ -753,11 +1031,13 @@ static int sanitise_afu_regs(struct cxl_afu *afu) | |||
753 | else | 1031 | else |
754 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); | 1032 | cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); |
755 | } | 1033 | } |
756 | reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); | 1034 | if (afu->adapter->native->sl_ops->register_serr_irq) { |
757 | if (reg) { | 1035 | reg = cxl_p1n_read(afu, CXL_PSL_SERR_An); |
758 | if (reg & ~0xffff) | 1036 | if (reg) { |
759 | dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); | 1037 | if (reg & ~0xffff) |
760 | cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); | 1038 | dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); |
1039 | cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff); | ||
1040 | } | ||
761 | } | 1041 | } |
762 | reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); | 1042 | reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An); |
763 | if (reg) { | 1043 | if (reg) { |
@@ -835,11 +1115,13 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc | |||
835 | if ((rc = cxl_afu_descriptor_looks_ok(afu))) | 1115 | if ((rc = cxl_afu_descriptor_looks_ok(afu))) |
836 | goto err1; | 1116 | goto err1; |
837 | 1117 | ||
838 | if ((rc = init_implementation_afu_regs(afu))) | 1118 | if (adapter->native->sl_ops->afu_regs_init) |
839 | goto err1; | 1119 | if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) |
1120 | goto err1; | ||
840 | 1121 | ||
841 | if ((rc = cxl_native_register_serr_irq(afu))) | 1122 | if (adapter->native->sl_ops->register_serr_irq) |
842 | goto err1; | 1123 | if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) |
1124 | goto err1; | ||
843 | 1125 | ||
844 | if ((rc = cxl_native_register_psl_irq(afu))) | 1126 | if ((rc = cxl_native_register_psl_irq(afu))) |
845 | goto err2; | 1127 | goto err2; |
@@ -847,7 +1129,8 @@ static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pc | |||
847 | return 0; | 1129 | return 0; |
848 | 1130 | ||
849 | err2: | 1131 | err2: |
850 | cxl_native_release_serr_irq(afu); | 1132 | if (adapter->native->sl_ops->release_serr_irq) |
1133 | adapter->native->sl_ops->release_serr_irq(afu); | ||
851 | err1: | 1134 | err1: |
852 | pci_unmap_slice_regs(afu); | 1135 | pci_unmap_slice_regs(afu); |
853 | return rc; | 1136 | return rc; |
@@ -856,7 +1139,8 @@ err1: | |||
856 | static void pci_deconfigure_afu(struct cxl_afu *afu) | 1139 | static void pci_deconfigure_afu(struct cxl_afu *afu) |
857 | { | 1140 | { |
858 | cxl_native_release_psl_irq(afu); | 1141 | cxl_native_release_psl_irq(afu); |
859 | cxl_native_release_serr_irq(afu); | 1142 | if (afu->adapter->native->sl_ops->release_serr_irq) |
1143 | afu->adapter->native->sl_ops->release_serr_irq(afu); | ||
860 | pci_unmap_slice_regs(afu); | 1144 | pci_unmap_slice_regs(afu); |
861 | } | 1145 | } |
862 | 1146 | ||
@@ -1165,7 +1449,7 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
1165 | if ((rc = setup_cxl_bars(dev))) | 1449 | if ((rc = setup_cxl_bars(dev))) |
1166 | return rc; | 1450 | return rc; |
1167 | 1451 | ||
1168 | if ((rc = switch_card_to_cxl(dev))) | 1452 | if ((rc = setup_cxl_protocol_area(dev))) |
1169 | return rc; | 1453 | return rc; |
1170 | 1454 | ||
1171 | if ((rc = cxl_update_image_control(adapter))) | 1455 | if ((rc = cxl_update_image_control(adapter))) |
@@ -1177,10 +1461,13 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) | |||
1177 | if ((rc = sanitise_adapter_regs(adapter))) | 1461 | if ((rc = sanitise_adapter_regs(adapter))) |
1178 | goto err; | 1462 | goto err; |
1179 | 1463 | ||
1180 | if ((rc = init_implementation_adapter_regs(adapter, dev))) | 1464 | if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) |
1181 | goto err; | 1465 | goto err; |
1182 | 1466 | ||
1183 | if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI))) | 1467 | /* Required for devices using CAPP DMA mode, harmless for others */ |
1468 | pci_set_master(dev); | ||
1469 | |||
1470 | if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) | ||
1184 | goto err; | 1471 | goto err; |
1185 | 1472 | ||
1186 | /* If recovery happened, the last step is to turn on snooping. | 1473 | /* If recovery happened, the last step is to turn on snooping. |
@@ -1212,6 +1499,43 @@ static void cxl_deconfigure_adapter(struct cxl *adapter) | |||
1212 | pci_disable_device(pdev); | 1499 | pci_disable_device(pdev); |
1213 | } | 1500 | } |
1214 | 1501 | ||
1502 | static const struct cxl_service_layer_ops psl_ops = { | ||
1503 | .adapter_regs_init = init_implementation_adapter_psl_regs, | ||
1504 | .afu_regs_init = init_implementation_afu_psl_regs, | ||
1505 | .register_serr_irq = cxl_native_register_serr_irq, | ||
1506 | .release_serr_irq = cxl_native_release_serr_irq, | ||
1507 | .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_psl_regs, | ||
1508 | .debugfs_add_afu_sl_regs = cxl_debugfs_add_afu_psl_regs, | ||
1509 | .psl_irq_dump_registers = cxl_native_psl_irq_dump_regs, | ||
1510 | .err_irq_dump_registers = cxl_native_err_irq_dump_regs, | ||
1511 | .debugfs_stop_trace = cxl_stop_trace, | ||
1512 | .write_timebase_ctrl = write_timebase_ctrl_psl, | ||
1513 | .timebase_read = timebase_read_psl, | ||
1514 | .capi_mode = OPAL_PHB_CAPI_MODE_CAPI, | ||
1515 | .needs_reset_before_disable = true, | ||
1516 | }; | ||
1517 | |||
1518 | static const struct cxl_service_layer_ops xsl_ops = { | ||
1519 | .adapter_regs_init = init_implementation_adapter_xsl_regs, | ||
1520 | .debugfs_add_adapter_sl_regs = cxl_debugfs_add_adapter_xsl_regs, | ||
1521 | .write_timebase_ctrl = write_timebase_ctrl_xsl, | ||
1522 | .timebase_read = timebase_read_xsl, | ||
1523 | .capi_mode = OPAL_PHB_CAPI_MODE_DMA, | ||
1524 | .min_pe = 1, /* Workaround for Mellanox CX4 HW bug */ | ||
1525 | }; | ||
1526 | |||
1527 | static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) | ||
1528 | { | ||
1529 | if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { | ||
1530 | dev_info(&adapter->dev, "Device uses an XSL\n"); | ||
1531 | adapter->native->sl_ops = &xsl_ops; | ||
1532 | } else { | ||
1533 | dev_info(&adapter->dev, "Device uses a PSL\n"); | ||
1534 | adapter->native->sl_ops = &psl_ops; | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1538 | |||
1215 | static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) | 1539 | static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) |
1216 | { | 1540 | { |
1217 | struct cxl *adapter; | 1541 | struct cxl *adapter; |
@@ -1227,6 +1551,8 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) | |||
1227 | goto err_release; | 1551 | goto err_release; |
1228 | } | 1552 | } |
1229 | 1553 | ||
1554 | set_sl_ops(adapter, dev); | ||
1555 | |||
1230 | /* Set defaults for parameters which need to persist over | 1556 | /* Set defaults for parameters which need to persist over |
1231 | * configure/reconfigure | 1557 | * configure/reconfigure |
1232 | */ | 1558 | */ |
@@ -1280,6 +1606,67 @@ static void cxl_pci_remove_adapter(struct cxl *adapter) | |||
1280 | device_unregister(&adapter->dev); | 1606 | device_unregister(&adapter->dev); |
1281 | } | 1607 | } |
1282 | 1608 | ||
1609 | #define CXL_MAX_PCIEX_PARENT 2 | ||
1610 | |||
1611 | static int cxl_slot_is_switched(struct pci_dev *dev) | ||
1612 | { | ||
1613 | struct device_node *np; | ||
1614 | int depth = 0; | ||
1615 | const __be32 *prop; | ||
1616 | |||
1617 | if (!(np = pci_device_to_OF_node(dev))) { | ||
1618 | pr_err("cxl: np = NULL\n"); | ||
1619 | return -ENODEV; | ||
1620 | } | ||
1621 | of_node_get(np); | ||
1622 | while (np) { | ||
1623 | np = of_get_next_parent(np); | ||
1624 | prop = of_get_property(np, "device_type", NULL); | ||
1625 | if (!prop || strcmp((char *)prop, "pciex")) | ||
1626 | break; | ||
1627 | depth++; | ||
1628 | } | ||
1629 | of_node_put(np); | ||
1630 | return (depth > CXL_MAX_PCIEX_PARENT); | ||
1631 | } | ||
1632 | |||
1633 | bool cxl_slot_is_supported(struct pci_dev *dev, int flags) | ||
1634 | { | ||
1635 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | ||
1636 | return false; | ||
1637 | |||
1638 | if ((flags & CXL_SLOT_FLAG_DMA) && (!pvr_version_is(PVR_POWER8NVL))) { | ||
1639 | /* | ||
1640 | * CAPP DMA mode is technically supported on regular P8, but | ||
1641 | * will EEH if the card attempts to access memory < 4GB, which | ||
1642 | * we cannot realistically avoid. We might be able to work | ||
1643 | * around the issue, but until then return unsupported: | ||
1644 | */ | ||
1645 | return false; | ||
1646 | } | ||
1647 | |||
1648 | if (cxl_slot_is_switched(dev)) | ||
1649 | return false; | ||
1650 | |||
1651 | /* | ||
1652 | * XXX: This gets a little tricky on regular P8 (not POWER8NVL) since | ||
1653 | * the CAPP can be connected to PHB 0, 1 or 2 on a first come first | ||
1654 | * served basis, which is racy to check from here. If we need to | ||
1655 | * support this in future we might need to consider having this | ||
1656 | * function effectively reserve it ahead of time. | ||
1657 | * | ||
1658 | * Currently, the only user of this API is the Mellanox CX4, which is | ||
1659 | * only supported on P8NVL due to the above mentioned limitation of | ||
1660 | * CAPP DMA mode and therefore does not need to worry about this. If the | ||
1661 | * issue with CAPP DMA mode is later worked around on P8 we might need | ||
1662 | * to revisit this. | ||
1663 | */ | ||
1664 | |||
1665 | return true; | ||
1666 | } | ||
1667 | EXPORT_SYMBOL_GPL(cxl_slot_is_supported); | ||
1668 | |||
1669 | |||
1283 | static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | 1670 | static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) |
1284 | { | 1671 | { |
1285 | struct cxl *adapter; | 1672 | struct cxl *adapter; |
@@ -1291,6 +1678,11 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1291 | return -ENODEV; | 1678 | return -ENODEV; |
1292 | } | 1679 | } |
1293 | 1680 | ||
1681 | if (cxl_slot_is_switched(dev)) { | ||
1682 | dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); | ||
1683 | return -ENODEV; | ||
1684 | } | ||
1685 | |||
1294 | if (cxl_verbose) | 1686 | if (cxl_verbose) |
1295 | dump_cxl_config_space(dev); | 1687 | dump_cxl_config_space(dev); |
1296 | 1688 | ||
@@ -1311,6 +1703,9 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1311 | dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); | 1703 | dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); |
1312 | } | 1704 | } |
1313 | 1705 | ||
1706 | if (pnv_pci_on_cxl_phb(dev) && adapter->slices >= 1) | ||
1707 | pnv_cxl_phb_set_peer_afu(dev, adapter->afu[0]); | ||
1708 | |||
1314 | return 0; | 1709 | return 0; |
1315 | } | 1710 | } |
1316 | 1711 | ||
@@ -1381,6 +1776,9 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, | |||
1381 | */ | 1776 | */ |
1382 | for (i = 0; i < adapter->slices; i++) { | 1777 | for (i = 0; i < adapter->slices; i++) { |
1383 | afu = adapter->afu[i]; | 1778 | afu = adapter->afu[i]; |
1779 | /* Only participate in EEH if we are on a virtual PHB */ | ||
1780 | if (afu->phb == NULL) | ||
1781 | return PCI_ERS_RESULT_NONE; | ||
1384 | cxl_vphb_error_detected(afu, state); | 1782 | cxl_vphb_error_detected(afu, state); |
1385 | } | 1783 | } |
1386 | return PCI_ERS_RESULT_DISCONNECT; | 1784 | return PCI_ERS_RESULT_DISCONNECT; |