diff options
Diffstat (limited to 'drivers/pci')
30 files changed, 1052 insertions, 351 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index 6671946dbf66..6012f3059acd 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig | |||
| @@ -175,7 +175,7 @@ config PCIE_IPROC_MSI | |||
| 175 | 175 | ||
| 176 | config PCIE_ALTERA | 176 | config PCIE_ALTERA |
| 177 | bool "Altera PCIe controller" | 177 | bool "Altera PCIe controller" |
| 178 | depends on ARM || NIOS2 || COMPILE_TEST | 178 | depends on ARM || NIOS2 || ARM64 || COMPILE_TEST |
| 179 | help | 179 | help |
| 180 | Say Y here if you want to enable PCIe controller support on Altera | 180 | Say Y here if you want to enable PCIe controller support on Altera |
| 181 | FPGA. | 181 | FPGA. |
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 548c58223868..6ea74b1c0d94 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig | |||
| @@ -89,8 +89,8 @@ config PCI_EXYNOS | |||
| 89 | select PCIE_DW_HOST | 89 | select PCIE_DW_HOST |
| 90 | 90 | ||
| 91 | config PCI_IMX6 | 91 | config PCI_IMX6 |
| 92 | bool "Freescale i.MX6/7 PCIe controller" | 92 | bool "Freescale i.MX6/7/8 PCIe controller" |
| 93 | depends on SOC_IMX6Q || SOC_IMX7D || (ARM && COMPILE_TEST) | 93 | depends on SOC_IMX6Q || SOC_IMX7D || (ARM64 && ARCH_MXC) || COMPILE_TEST |
| 94 | depends on PCI_MSI_IRQ_DOMAIN | 94 | depends on PCI_MSI_IRQ_DOMAIN |
| 95 | select PCIE_DW_HOST | 95 | select PCIE_DW_HOST |
| 96 | 96 | ||
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 15620cfa617b..ae84a69ae63a 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c | |||
| @@ -81,6 +81,10 @@ | |||
| 81 | #define MSI_REQ_GRANT BIT(0) | 81 | #define MSI_REQ_GRANT BIT(0) |
| 82 | #define MSI_VECTOR_SHIFT 7 | 82 | #define MSI_VECTOR_SHIFT 7 |
| 83 | 83 | ||
| 84 | #define PCIE_1LANE_2LANE_SELECTION BIT(13) | ||
| 85 | #define PCIE_B1C0_MODE_SEL BIT(2) | ||
| 86 | #define PCIE_B0_B1_TSYNCEN BIT(0) | ||
| 87 | |||
| 84 | struct dra7xx_pcie { | 88 | struct dra7xx_pcie { |
| 85 | struct dw_pcie *pci; | 89 | struct dw_pcie *pci; |
| 86 | void __iomem *base; /* DT ti_conf */ | 90 | void __iomem *base; /* DT ti_conf */ |
| @@ -93,6 +97,7 @@ struct dra7xx_pcie { | |||
| 93 | 97 | ||
| 94 | struct dra7xx_pcie_of_data { | 98 | struct dra7xx_pcie_of_data { |
| 95 | enum dw_pcie_device_mode mode; | 99 | enum dw_pcie_device_mode mode; |
| 100 | u32 b1co_mode_sel_mask; | ||
| 96 | }; | 101 | }; |
| 97 | 102 | ||
| 98 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) | 103 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) |
| @@ -512,6 +517,10 @@ static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) | |||
| 512 | int i; | 517 | int i; |
| 513 | 518 | ||
| 514 | for (i = 0; i < phy_count; i++) { | 519 | for (i = 0; i < phy_count; i++) { |
| 520 | ret = phy_set_mode(dra7xx->phy[i], PHY_MODE_PCIE); | ||
| 521 | if (ret < 0) | ||
| 522 | goto err_phy; | ||
| 523 | |||
| 515 | ret = phy_init(dra7xx->phy[i]); | 524 | ret = phy_init(dra7xx->phy[i]); |
| 516 | if (ret < 0) | 525 | if (ret < 0) |
| 517 | goto err_phy; | 526 | goto err_phy; |
| @@ -542,6 +551,26 @@ static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | |||
| 542 | .mode = DW_PCIE_EP_TYPE, | 551 | .mode = DW_PCIE_EP_TYPE, |
| 543 | }; | 552 | }; |
| 544 | 553 | ||
| 554 | static const struct dra7xx_pcie_of_data dra746_pcie_rc_of_data = { | ||
| 555 | .b1co_mode_sel_mask = BIT(2), | ||
| 556 | .mode = DW_PCIE_RC_TYPE, | ||
| 557 | }; | ||
| 558 | |||
| 559 | static const struct dra7xx_pcie_of_data dra726_pcie_rc_of_data = { | ||
| 560 | .b1co_mode_sel_mask = GENMASK(3, 2), | ||
| 561 | .mode = DW_PCIE_RC_TYPE, | ||
| 562 | }; | ||
| 563 | |||
| 564 | static const struct dra7xx_pcie_of_data dra746_pcie_ep_of_data = { | ||
| 565 | .b1co_mode_sel_mask = BIT(2), | ||
| 566 | .mode = DW_PCIE_EP_TYPE, | ||
| 567 | }; | ||
| 568 | |||
| 569 | static const struct dra7xx_pcie_of_data dra726_pcie_ep_of_data = { | ||
| 570 | .b1co_mode_sel_mask = GENMASK(3, 2), | ||
| 571 | .mode = DW_PCIE_EP_TYPE, | ||
| 572 | }; | ||
| 573 | |||
| 545 | static const struct of_device_id of_dra7xx_pcie_match[] = { | 574 | static const struct of_device_id of_dra7xx_pcie_match[] = { |
| 546 | { | 575 | { |
| 547 | .compatible = "ti,dra7-pcie", | 576 | .compatible = "ti,dra7-pcie", |
| @@ -551,6 +580,22 @@ static const struct of_device_id of_dra7xx_pcie_match[] = { | |||
| 551 | .compatible = "ti,dra7-pcie-ep", | 580 | .compatible = "ti,dra7-pcie-ep", |
| 552 | .data = &dra7xx_pcie_ep_of_data, | 581 | .data = &dra7xx_pcie_ep_of_data, |
| 553 | }, | 582 | }, |
| 583 | { | ||
| 584 | .compatible = "ti,dra746-pcie-rc", | ||
| 585 | .data = &dra746_pcie_rc_of_data, | ||
| 586 | }, | ||
| 587 | { | ||
| 588 | .compatible = "ti,dra726-pcie-rc", | ||
| 589 | .data = &dra726_pcie_rc_of_data, | ||
| 590 | }, | ||
| 591 | { | ||
| 592 | .compatible = "ti,dra746-pcie-ep", | ||
| 593 | .data = &dra746_pcie_ep_of_data, | ||
| 594 | }, | ||
| 595 | { | ||
| 596 | .compatible = "ti,dra726-pcie-ep", | ||
| 597 | .data = &dra726_pcie_ep_of_data, | ||
| 598 | }, | ||
| 554 | {}, | 599 | {}, |
| 555 | }; | 600 | }; |
| 556 | 601 | ||
| @@ -596,6 +641,34 @@ static int dra7xx_pcie_unaligned_memaccess(struct device *dev) | |||
| 596 | return ret; | 641 | return ret; |
| 597 | } | 642 | } |
| 598 | 643 | ||
| 644 | static int dra7xx_pcie_configure_two_lane(struct device *dev, | ||
| 645 | u32 b1co_mode_sel_mask) | ||
| 646 | { | ||
| 647 | struct device_node *np = dev->of_node; | ||
| 648 | struct regmap *pcie_syscon; | ||
| 649 | unsigned int pcie_reg; | ||
| 650 | u32 mask; | ||
| 651 | u32 val; | ||
| 652 | |||
| 653 | pcie_syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-lane-sel"); | ||
| 654 | if (IS_ERR(pcie_syscon)) { | ||
| 655 | dev_err(dev, "unable to get ti,syscon-lane-sel\n"); | ||
| 656 | return -EINVAL; | ||
| 657 | } | ||
| 658 | |||
| 659 | if (of_property_read_u32_index(np, "ti,syscon-lane-sel", 1, | ||
| 660 | &pcie_reg)) { | ||
| 661 | dev_err(dev, "couldn't get lane selection reg offset\n"); | ||
| 662 | return -EINVAL; | ||
| 663 | } | ||
| 664 | |||
| 665 | mask = b1co_mode_sel_mask | PCIE_B0_B1_TSYNCEN; | ||
| 666 | val = PCIE_B1C0_MODE_SEL | PCIE_B0_B1_TSYNCEN; | ||
| 667 | regmap_update_bits(pcie_syscon, pcie_reg, mask, val); | ||
| 668 | |||
| 669 | return 0; | ||
| 670 | } | ||
| 671 | |||
| 599 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) | 672 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) |
| 600 | { | 673 | { |
| 601 | u32 reg; | 674 | u32 reg; |
| @@ -616,6 +689,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
| 616 | const struct of_device_id *match; | 689 | const struct of_device_id *match; |
| 617 | const struct dra7xx_pcie_of_data *data; | 690 | const struct dra7xx_pcie_of_data *data; |
| 618 | enum dw_pcie_device_mode mode; | 691 | enum dw_pcie_device_mode mode; |
| 692 | u32 b1co_mode_sel_mask; | ||
| 619 | 693 | ||
| 620 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | 694 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); |
| 621 | if (!match) | 695 | if (!match) |
| @@ -623,6 +697,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
| 623 | 697 | ||
| 624 | data = (struct dra7xx_pcie_of_data *)match->data; | 698 | data = (struct dra7xx_pcie_of_data *)match->data; |
| 625 | mode = (enum dw_pcie_device_mode)data->mode; | 699 | mode = (enum dw_pcie_device_mode)data->mode; |
| 700 | b1co_mode_sel_mask = data->b1co_mode_sel_mask; | ||
| 626 | 701 | ||
| 627 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | 702 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); |
| 628 | if (!dra7xx) | 703 | if (!dra7xx) |
| @@ -678,6 +753,12 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) | |||
| 678 | dra7xx->pci = pci; | 753 | dra7xx->pci = pci; |
| 679 | dra7xx->phy_count = phy_count; | 754 | dra7xx->phy_count = phy_count; |
| 680 | 755 | ||
| 756 | if (phy_count == 2) { | ||
| 757 | ret = dra7xx_pcie_configure_two_lane(dev, b1co_mode_sel_mask); | ||
| 758 | if (ret < 0) | ||
| 759 | dra7xx->phy_count = 1; /* Fallback to x1 lane mode */ | ||
| 760 | } | ||
| 761 | |||
| 681 | ret = dra7xx_pcie_enable_phy(dra7xx); | 762 | ret = dra7xx_pcie_enable_phy(dra7xx); |
| 682 | if (ret) { | 763 | if (ret) { |
| 683 | dev_err(dev, "failed to enable phy\n"); | 764 | dev_err(dev, "failed to enable phy\n"); |
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52e47dac028f..5ae75f25c6fc 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | * Author: Sean Cross <xobs@kosagi.com> | 8 | * Author: Sean Cross <xobs@kosagi.com> |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/bitfield.h> | ||
| 11 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
| 12 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 13 | #include <linux/gpio.h> | 14 | #include <linux/gpio.h> |
| @@ -18,6 +19,7 @@ | |||
| 18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 19 | #include <linux/of_gpio.h> | 20 | #include <linux/of_gpio.h> |
| 20 | #include <linux/of_device.h> | 21 | #include <linux/of_device.h> |
| 22 | #include <linux/of_address.h> | ||
| 21 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
| 22 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
| 23 | #include <linux/regmap.h> | 25 | #include <linux/regmap.h> |
| @@ -32,6 +34,12 @@ | |||
| 32 | 34 | ||
| 33 | #include "pcie-designware.h" | 35 | #include "pcie-designware.h" |
| 34 | 36 | ||
| 37 | #define IMX8MQ_GPR_PCIE_REF_USE_PAD BIT(9) | ||
| 38 | #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN BIT(10) | ||
| 39 | #define IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE BIT(11) | ||
| 40 | #define IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE GENMASK(11, 8) | ||
| 41 | #define IMX8MQ_PCIE2_BASE_ADDR 0x33c00000 | ||
| 42 | |||
| 35 | #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) | 43 | #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) |
| 36 | 44 | ||
| 37 | enum imx6_pcie_variants { | 45 | enum imx6_pcie_variants { |
| @@ -39,6 +47,15 @@ enum imx6_pcie_variants { | |||
| 39 | IMX6SX, | 47 | IMX6SX, |
| 40 | IMX6QP, | 48 | IMX6QP, |
| 41 | IMX7D, | 49 | IMX7D, |
| 50 | IMX8MQ, | ||
| 51 | }; | ||
| 52 | |||
| 53 | #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) | ||
| 54 | #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) | ||
| 55 | |||
| 56 | struct imx6_pcie_drvdata { | ||
| 57 | enum imx6_pcie_variants variant; | ||
| 58 | u32 flags; | ||
| 42 | }; | 59 | }; |
| 43 | 60 | ||
| 44 | struct imx6_pcie { | 61 | struct imx6_pcie { |
| @@ -49,11 +66,12 @@ struct imx6_pcie { | |||
| 49 | struct clk *pcie_phy; | 66 | struct clk *pcie_phy; |
| 50 | struct clk *pcie_inbound_axi; | 67 | struct clk *pcie_inbound_axi; |
| 51 | struct clk *pcie; | 68 | struct clk *pcie; |
| 69 | struct clk *pcie_aux; | ||
| 52 | struct regmap *iomuxc_gpr; | 70 | struct regmap *iomuxc_gpr; |
| 71 | u32 controller_id; | ||
| 53 | struct reset_control *pciephy_reset; | 72 | struct reset_control *pciephy_reset; |
| 54 | struct reset_control *apps_reset; | 73 | struct reset_control *apps_reset; |
| 55 | struct reset_control *turnoff_reset; | 74 | struct reset_control *turnoff_reset; |
| 56 | enum imx6_pcie_variants variant; | ||
| 57 | u32 tx_deemph_gen1; | 75 | u32 tx_deemph_gen1; |
| 58 | u32 tx_deemph_gen2_3p5db; | 76 | u32 tx_deemph_gen2_3p5db; |
| 59 | u32 tx_deemph_gen2_6db; | 77 | u32 tx_deemph_gen2_6db; |
| @@ -61,11 +79,13 @@ struct imx6_pcie { | |||
| 61 | u32 tx_swing_low; | 79 | u32 tx_swing_low; |
| 62 | int link_gen; | 80 | int link_gen; |
| 63 | struct regulator *vpcie; | 81 | struct regulator *vpcie; |
| 82 | void __iomem *phy_base; | ||
| 64 | 83 | ||
| 65 | /* power domain for pcie */ | 84 | /* power domain for pcie */ |
| 66 | struct device *pd_pcie; | 85 | struct device *pd_pcie; |
| 67 | /* power domain for pcie phy */ | 86 | /* power domain for pcie phy */ |
| 68 | struct device *pd_pcie_phy; | 87 | struct device *pd_pcie_phy; |
| 88 | const struct imx6_pcie_drvdata *drvdata; | ||
| 69 | }; | 89 | }; |
| 70 | 90 | ||
| 71 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | 91 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ |
| @@ -101,7 +121,6 @@ struct imx6_pcie { | |||
| 101 | #define PCIE_PHY_STAT_ACK_LOC 16 | 121 | #define PCIE_PHY_STAT_ACK_LOC 16 |
| 102 | 122 | ||
| 103 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 123 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
| 104 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
| 105 | 124 | ||
| 106 | /* PHY registers (not memory-mapped) */ | 125 | /* PHY registers (not memory-mapped) */ |
| 107 | #define PCIE_PHY_ATEOVRD 0x10 | 126 | #define PCIE_PHY_ATEOVRD 0x10 |
| @@ -117,6 +136,23 @@ struct imx6_pcie { | |||
| 117 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | 136 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
| 118 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) | 137 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) |
| 119 | 138 | ||
| 139 | /* iMX7 PCIe PHY registers */ | ||
| 140 | #define PCIE_PHY_CMN_REG4 0x14 | ||
| 141 | /* These are probably the bits that *aren't* DCC_FB_EN */ | ||
| 142 | #define PCIE_PHY_CMN_REG4_DCC_FB_EN 0x29 | ||
| 143 | |||
| 144 | #define PCIE_PHY_CMN_REG15 0x54 | ||
| 145 | #define PCIE_PHY_CMN_REG15_DLY_4 BIT(2) | ||
| 146 | #define PCIE_PHY_CMN_REG15_PLL_PD BIT(5) | ||
| 147 | #define PCIE_PHY_CMN_REG15_OVRD_PLL_PD BIT(7) | ||
| 148 | |||
| 149 | #define PCIE_PHY_CMN_REG24 0x90 | ||
| 150 | #define PCIE_PHY_CMN_REG24_RX_EQ BIT(6) | ||
| 151 | #define PCIE_PHY_CMN_REG24_RX_EQ_SEL BIT(3) | ||
| 152 | |||
| 153 | #define PCIE_PHY_CMN_REG26 0x98 | ||
| 154 | #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC | ||
| 155 | |||
| 120 | #define PHY_RX_OVRD_IN_LO 0x1005 | 156 | #define PHY_RX_OVRD_IN_LO 0x1005 |
| 121 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | 157 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) |
| 122 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | 158 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) |
| @@ -251,6 +287,9 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | |||
| 251 | { | 287 | { |
| 252 | u32 tmp; | 288 | u32 tmp; |
| 253 | 289 | ||
| 290 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) | ||
| 291 | return; | ||
| 292 | |||
| 254 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); | 293 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); |
| 255 | tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | | 294 | tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | |
| 256 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | 295 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); |
| @@ -264,6 +303,7 @@ static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | |||
| 264 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); | 303 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); |
| 265 | } | 304 | } |
| 266 | 305 | ||
| 306 | #ifdef CONFIG_ARM | ||
| 267 | /* Added for PCI abort handling */ | 307 | /* Added for PCI abort handling */ |
| 268 | static int imx6q_pcie_abort_handler(unsigned long addr, | 308 | static int imx6q_pcie_abort_handler(unsigned long addr, |
| 269 | unsigned int fsr, struct pt_regs *regs) | 309 | unsigned int fsr, struct pt_regs *regs) |
| @@ -297,6 +337,7 @@ static int imx6q_pcie_abort_handler(unsigned long addr, | |||
| 297 | 337 | ||
| 298 | return 1; | 338 | return 1; |
| 299 | } | 339 | } |
| 340 | #endif | ||
| 300 | 341 | ||
| 301 | static int imx6_pcie_attach_pd(struct device *dev) | 342 | static int imx6_pcie_attach_pd(struct device *dev) |
| 302 | { | 343 | { |
| @@ -339,8 +380,9 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | |||
| 339 | { | 380 | { |
| 340 | struct device *dev = imx6_pcie->pci->dev; | 381 | struct device *dev = imx6_pcie->pci->dev; |
| 341 | 382 | ||
| 342 | switch (imx6_pcie->variant) { | 383 | switch (imx6_pcie->drvdata->variant) { |
| 343 | case IMX7D: | 384 | case IMX7D: |
| 385 | case IMX8MQ: | ||
| 344 | reset_control_assert(imx6_pcie->pciephy_reset); | 386 | reset_control_assert(imx6_pcie->pciephy_reset); |
| 345 | reset_control_assert(imx6_pcie->apps_reset); | 387 | reset_control_assert(imx6_pcie->apps_reset); |
| 346 | break; | 388 | break; |
| @@ -375,13 +417,20 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | |||
| 375 | } | 417 | } |
| 376 | } | 418 | } |
| 377 | 419 | ||
| 420 | static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie) | ||
| 421 | { | ||
| 422 | WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ); | ||
| 423 | return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14; | ||
| 424 | } | ||
| 425 | |||
| 378 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | 426 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) |
| 379 | { | 427 | { |
| 380 | struct dw_pcie *pci = imx6_pcie->pci; | 428 | struct dw_pcie *pci = imx6_pcie->pci; |
| 381 | struct device *dev = pci->dev; | 429 | struct device *dev = pci->dev; |
| 430 | unsigned int offset; | ||
| 382 | int ret = 0; | 431 | int ret = 0; |
| 383 | 432 | ||
| 384 | switch (imx6_pcie->variant) { | 433 | switch (imx6_pcie->drvdata->variant) { |
| 385 | case IMX6SX: | 434 | case IMX6SX: |
| 386 | ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); | 435 | ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); |
| 387 | if (ret) { | 436 | if (ret) { |
| @@ -409,6 +458,25 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | |||
| 409 | break; | 458 | break; |
| 410 | case IMX7D: | 459 | case IMX7D: |
| 411 | break; | 460 | break; |
| 461 | case IMX8MQ: | ||
| 462 | ret = clk_prepare_enable(imx6_pcie->pcie_aux); | ||
| 463 | if (ret) { | ||
| 464 | dev_err(dev, "unable to enable pcie_aux clock\n"); | ||
| 465 | break; | ||
| 466 | } | ||
| 467 | |||
| 468 | offset = imx6_pcie_grp_offset(imx6_pcie); | ||
| 469 | /* | ||
| 470 | * Set the over ride low and enabled | ||
| 471 | * make sure that REF_CLK is turned on. | ||
| 472 | */ | ||
| 473 | regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, | ||
| 474 | IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE, | ||
| 475 | 0); | ||
| 476 | regmap_update_bits(imx6_pcie->iomuxc_gpr, offset, | ||
| 477 | IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN, | ||
| 478 | IMX8MQ_GPR_PCIE_CLK_REQ_OVERRIDE_EN); | ||
| 479 | break; | ||
| 412 | } | 480 | } |
| 413 | 481 | ||
| 414 | return ret; | 482 | return ret; |
| @@ -484,9 +552,32 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | |||
| 484 | !imx6_pcie->gpio_active_high); | 552 | !imx6_pcie->gpio_active_high); |
| 485 | } | 553 | } |
| 486 | 554 | ||
| 487 | switch (imx6_pcie->variant) { | 555 | switch (imx6_pcie->drvdata->variant) { |
| 556 | case IMX8MQ: | ||
| 557 | reset_control_deassert(imx6_pcie->pciephy_reset); | ||
| 558 | break; | ||
| 488 | case IMX7D: | 559 | case IMX7D: |
| 489 | reset_control_deassert(imx6_pcie->pciephy_reset); | 560 | reset_control_deassert(imx6_pcie->pciephy_reset); |
| 561 | |||
| 562 | /* Workaround for ERR010728, failure of PCI-e PLL VCO to | ||
| 563 | * oscillate, especially when cold. This turns off "Duty-cycle | ||
| 564 | * Corrector" and other mysterious undocumented things. | ||
| 565 | */ | ||
| 566 | if (likely(imx6_pcie->phy_base)) { | ||
| 567 | /* De-assert DCC_FB_EN */ | ||
| 568 | writel(PCIE_PHY_CMN_REG4_DCC_FB_EN, | ||
| 569 | imx6_pcie->phy_base + PCIE_PHY_CMN_REG4); | ||
| 570 | /* Assert RX_EQS and RX_EQS_SEL */ | ||
| 571 | writel(PCIE_PHY_CMN_REG24_RX_EQ_SEL | ||
| 572 | | PCIE_PHY_CMN_REG24_RX_EQ, | ||
| 573 | imx6_pcie->phy_base + PCIE_PHY_CMN_REG24); | ||
| 574 | /* Assert ATT_MODE */ | ||
| 575 | writel(PCIE_PHY_CMN_REG26_ATT_MODE, | ||
| 576 | imx6_pcie->phy_base + PCIE_PHY_CMN_REG26); | ||
| 577 | } else { | ||
| 578 | dev_warn(dev, "Unable to apply ERR010728 workaround. DT missing fsl,imx7d-pcie-phy phandle ?\n"); | ||
| 579 | } | ||
| 580 | |||
| 490 | imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); | 581 | imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); |
| 491 | break; | 582 | break; |
| 492 | case IMX6SX: | 583 | case IMX6SX: |
| @@ -520,9 +611,37 @@ err_pcie_phy: | |||
| 520 | } | 611 | } |
| 521 | } | 612 | } |
| 522 | 613 | ||
| 614 | static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie) | ||
| 615 | { | ||
| 616 | unsigned int mask, val; | ||
| 617 | |||
| 618 | if (imx6_pcie->drvdata->variant == IMX8MQ && | ||
| 619 | imx6_pcie->controller_id == 1) { | ||
| 620 | mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE; | ||
| 621 | val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE, | ||
| 622 | PCI_EXP_TYPE_ROOT_PORT); | ||
| 623 | } else { | ||
| 624 | mask = IMX6Q_GPR12_DEVICE_TYPE; | ||
| 625 | val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, | ||
| 626 | PCI_EXP_TYPE_ROOT_PORT); | ||
| 627 | } | ||
| 628 | |||
| 629 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val); | ||
| 630 | } | ||
| 631 | |||
| 523 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | 632 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) |
| 524 | { | 633 | { |
| 525 | switch (imx6_pcie->variant) { | 634 | switch (imx6_pcie->drvdata->variant) { |
| 635 | case IMX8MQ: | ||
| 636 | /* | ||
| 637 | * TODO: Currently this code assumes external | ||
| 638 | * oscillator is being used | ||
| 639 | */ | ||
| 640 | regmap_update_bits(imx6_pcie->iomuxc_gpr, | ||
| 641 | imx6_pcie_grp_offset(imx6_pcie), | ||
| 642 | IMX8MQ_GPR_PCIE_REF_USE_PAD, | ||
| 643 | IMX8MQ_GPR_PCIE_REF_USE_PAD); | ||
| 644 | break; | ||
| 526 | case IMX7D: | 645 | case IMX7D: |
| 527 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 646 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
| 528 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); | 647 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); |
| @@ -558,8 +677,7 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | |||
| 558 | break; | 677 | break; |
| 559 | } | 678 | } |
| 560 | 679 | ||
| 561 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 680 | imx6_pcie_configure_type(imx6_pcie); |
| 562 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); | ||
| 563 | } | 681 | } |
| 564 | 682 | ||
| 565 | static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) | 683 | static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) |
| @@ -568,6 +686,9 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) | |||
| 568 | int mult, div; | 686 | int mult, div; |
| 569 | u32 val; | 687 | u32 val; |
| 570 | 688 | ||
| 689 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) | ||
| 690 | return 0; | ||
| 691 | |||
| 571 | switch (phy_rate) { | 692 | switch (phy_rate) { |
| 572 | case 125000000: | 693 | case 125000000: |
| 573 | /* | 694 | /* |
| @@ -644,7 +765,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev) | |||
| 644 | { | 765 | { |
| 645 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); | 766 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); |
| 646 | 767 | ||
| 647 | switch (imx6_pcie->variant) { | 768 | switch (imx6_pcie->drvdata->variant) { |
| 648 | case IMX6Q: | 769 | case IMX6Q: |
| 649 | case IMX6SX: | 770 | case IMX6SX: |
| 650 | case IMX6QP: | 771 | case IMX6QP: |
| @@ -653,6 +774,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev) | |||
| 653 | IMX6Q_GPR12_PCIE_CTL_2); | 774 | IMX6Q_GPR12_PCIE_CTL_2); |
| 654 | break; | 775 | break; |
| 655 | case IMX7D: | 776 | case IMX7D: |
| 777 | case IMX8MQ: | ||
| 656 | reset_control_deassert(imx6_pcie->apps_reset); | 778 | reset_control_deassert(imx6_pcie->apps_reset); |
| 657 | break; | 779 | break; |
| 658 | } | 780 | } |
| @@ -697,7 +819,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
| 697 | tmp |= PORT_LOGIC_SPEED_CHANGE; | 819 | tmp |= PORT_LOGIC_SPEED_CHANGE; |
| 698 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); | 820 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); |
| 699 | 821 | ||
| 700 | if (imx6_pcie->variant != IMX7D) { | 822 | if (imx6_pcie->drvdata->flags & |
| 823 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE) { | ||
| 701 | /* | 824 | /* |
| 702 | * On i.MX7, DIRECT_SPEED_CHANGE behaves differently | 825 | * On i.MX7, DIRECT_SPEED_CHANGE behaves differently |
| 703 | * from i.MX6 family when no link speed transition | 826 | * from i.MX6 family when no link speed transition |
| @@ -794,7 +917,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev) | |||
| 794 | { | 917 | { |
| 795 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); | 918 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); |
| 796 | 919 | ||
| 797 | switch (imx6_pcie->variant) { | 920 | switch (imx6_pcie->drvdata->variant) { |
| 798 | case IMX6SX: | 921 | case IMX6SX: |
| 799 | case IMX6QP: | 922 | case IMX6QP: |
| 800 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 923 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
| @@ -820,7 +943,7 @@ static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie) | |||
| 820 | } | 943 | } |
| 821 | 944 | ||
| 822 | /* Others poke directly at IOMUXC registers */ | 945 | /* Others poke directly at IOMUXC registers */ |
| 823 | switch (imx6_pcie->variant) { | 946 | switch (imx6_pcie->drvdata->variant) { |
| 824 | case IMX6SX: | 947 | case IMX6SX: |
| 825 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 948 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
| 826 | IMX6SX_GPR12_PCIE_PM_TURN_OFF, | 949 | IMX6SX_GPR12_PCIE_PM_TURN_OFF, |
| @@ -850,7 +973,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) | |||
| 850 | clk_disable_unprepare(imx6_pcie->pcie_phy); | 973 | clk_disable_unprepare(imx6_pcie->pcie_phy); |
| 851 | clk_disable_unprepare(imx6_pcie->pcie_bus); | 974 | clk_disable_unprepare(imx6_pcie->pcie_bus); |
| 852 | 975 | ||
| 853 | switch (imx6_pcie->variant) { | 976 | switch (imx6_pcie->drvdata->variant) { |
| 854 | case IMX6SX: | 977 | case IMX6SX: |
| 855 | clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); | 978 | clk_disable_unprepare(imx6_pcie->pcie_inbound_axi); |
| 856 | break; | 979 | break; |
| @@ -859,6 +982,9 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) | |||
| 859 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, | 982 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, |
| 860 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); | 983 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL); |
| 861 | break; | 984 | break; |
| 985 | case IMX8MQ: | ||
| 986 | clk_disable_unprepare(imx6_pcie->pcie_aux); | ||
| 987 | break; | ||
| 862 | default: | 988 | default: |
| 863 | break; | 989 | break; |
| 864 | } | 990 | } |
| @@ -866,8 +992,8 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) | |||
| 866 | 992 | ||
| 867 | static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) | 993 | static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) |
| 868 | { | 994 | { |
| 869 | return (imx6_pcie->variant == IMX7D || | 995 | return (imx6_pcie->drvdata->variant == IMX7D || |
| 870 | imx6_pcie->variant == IMX6SX); | 996 | imx6_pcie->drvdata->variant == IMX6SX); |
| 871 | } | 997 | } |
| 872 | 998 | ||
| 873 | static int imx6_pcie_suspend_noirq(struct device *dev) | 999 | static int imx6_pcie_suspend_noirq(struct device *dev) |
| @@ -916,6 +1042,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
| 916 | struct device *dev = &pdev->dev; | 1042 | struct device *dev = &pdev->dev; |
| 917 | struct dw_pcie *pci; | 1043 | struct dw_pcie *pci; |
| 918 | struct imx6_pcie *imx6_pcie; | 1044 | struct imx6_pcie *imx6_pcie; |
| 1045 | struct device_node *np; | ||
| 919 | struct resource *dbi_base; | 1046 | struct resource *dbi_base; |
| 920 | struct device_node *node = dev->of_node; | 1047 | struct device_node *node = dev->of_node; |
| 921 | int ret; | 1048 | int ret; |
| @@ -933,8 +1060,24 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
| 933 | pci->ops = &dw_pcie_ops; | 1060 | pci->ops = &dw_pcie_ops; |
| 934 | 1061 | ||
| 935 | imx6_pcie->pci = pci; | 1062 | imx6_pcie->pci = pci; |
| 936 | imx6_pcie->variant = | 1063 | imx6_pcie->drvdata = of_device_get_match_data(dev); |
| 937 | (enum imx6_pcie_variants)of_device_get_match_data(dev); | 1064 | |
| 1065 | /* Find the PHY if one is defined, only imx7d uses it */ | ||
| 1066 | np = of_parse_phandle(node, "fsl,imx7d-pcie-phy", 0); | ||
| 1067 | if (np) { | ||
| 1068 | struct resource res; | ||
| 1069 | |||
| 1070 | ret = of_address_to_resource(np, 0, &res); | ||
| 1071 | if (ret) { | ||
| 1072 | dev_err(dev, "Unable to map PCIe PHY\n"); | ||
| 1073 | return ret; | ||
| 1074 | } | ||
| 1075 | imx6_pcie->phy_base = devm_ioremap_resource(dev, &res); | ||
| 1076 | if (IS_ERR(imx6_pcie->phy_base)) { | ||
| 1077 | dev_err(dev, "Unable to map PCIe PHY\n"); | ||
| 1078 | return PTR_ERR(imx6_pcie->phy_base); | ||
| 1079 | } | ||
| 1080 | } | ||
| 938 | 1081 | ||
| 939 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1082 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 940 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | 1083 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); |
| @@ -978,7 +1121,7 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
| 978 | return PTR_ERR(imx6_pcie->pcie); | 1121 | return PTR_ERR(imx6_pcie->pcie); |
| 979 | } | 1122 | } |
| 980 | 1123 | ||
| 981 | switch (imx6_pcie->variant) { | 1124 | switch (imx6_pcie->drvdata->variant) { |
| 982 | case IMX6SX: | 1125 | case IMX6SX: |
| 983 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, | 1126 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, |
| 984 | "pcie_inbound_axi"); | 1127 | "pcie_inbound_axi"); |
| @@ -987,7 +1130,17 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
| 987 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); | 1130 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); |
| 988 | } | 1131 | } |
| 989 | break; | 1132 | break; |
| 1133 | case IMX8MQ: | ||
| 1134 | imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux"); | ||
| 1135 | if (IS_ERR(imx6_pcie->pcie_aux)) { | ||
| 1136 | dev_err(dev, "pcie_aux clock source missing or invalid\n"); | ||
| 1137 | return PTR_ERR(imx6_pcie->pcie_aux); | ||
| 1138 | } | ||
| 1139 | /* fall through */ | ||
| 990 | case IMX7D: | 1140 | case IMX7D: |
| 1141 | if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) | ||
| 1142 | imx6_pcie->controller_id = 1; | ||
| 1143 | |||
| 991 | imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, | 1144 | imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, |
| 992 | "pciephy"); | 1145 | "pciephy"); |
| 993 | if (IS_ERR(imx6_pcie->pciephy_reset)) { | 1146 | if (IS_ERR(imx6_pcie->pciephy_reset)) { |
| @@ -1084,11 +1237,36 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) | |||
| 1084 | imx6_pcie_assert_core_reset(imx6_pcie); | 1237 | imx6_pcie_assert_core_reset(imx6_pcie); |
| 1085 | } | 1238 | } |
| 1086 | 1239 | ||
| 1240 | static const struct imx6_pcie_drvdata drvdata[] = { | ||
| 1241 | [IMX6Q] = { | ||
| 1242 | .variant = IMX6Q, | ||
| 1243 | .flags = IMX6_PCIE_FLAG_IMX6_PHY | | ||
| 1244 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, | ||
| 1245 | }, | ||
| 1246 | [IMX6SX] = { | ||
| 1247 | .variant = IMX6SX, | ||
| 1248 | .flags = IMX6_PCIE_FLAG_IMX6_PHY | | ||
| 1249 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, | ||
| 1250 | }, | ||
| 1251 | [IMX6QP] = { | ||
| 1252 | .variant = IMX6QP, | ||
| 1253 | .flags = IMX6_PCIE_FLAG_IMX6_PHY | | ||
| 1254 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, | ||
| 1255 | }, | ||
| 1256 | [IMX7D] = { | ||
| 1257 | .variant = IMX7D, | ||
| 1258 | }, | ||
| 1259 | [IMX8MQ] = { | ||
| 1260 | .variant = IMX8MQ, | ||
| 1261 | }, | ||
| 1262 | }; | ||
| 1263 | |||
| 1087 | static const struct of_device_id imx6_pcie_of_match[] = { | 1264 | static const struct of_device_id imx6_pcie_of_match[] = { |
| 1088 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, | 1265 | { .compatible = "fsl,imx6q-pcie", .data = &drvdata[IMX6Q], }, |
| 1089 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, | 1266 | { .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], }, |
| 1090 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, | 1267 | { .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], }, |
| 1091 | { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, | 1268 | { .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], }, |
| 1269 | { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } , | ||
| 1092 | {}, | 1270 | {}, |
| 1093 | }; | 1271 | }; |
| 1094 | 1272 | ||
| @@ -1105,6 +1283,7 @@ static struct platform_driver imx6_pcie_driver = { | |||
| 1105 | 1283 | ||
| 1106 | static int __init imx6_pcie_init(void) | 1284 | static int __init imx6_pcie_init(void) |
| 1107 | { | 1285 | { |
| 1286 | #ifdef CONFIG_ARM | ||
| 1108 | /* | 1287 | /* |
| 1109 | * Since probe() can be deferred we need to make sure that | 1288 | * Since probe() can be deferred we need to make sure that |
| 1110 | * hook_fault_code is not called after __init memory is freed | 1289 | * hook_fault_code is not called after __init memory is freed |
| @@ -1114,6 +1293,7 @@ static int __init imx6_pcie_init(void) | |||
| 1114 | */ | 1293 | */ |
| 1115 | hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, | 1294 | hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, |
| 1116 | "external abort on non-linefetch"); | 1295 | "external abort on non-linefetch"); |
| 1296 | #endif | ||
| 1117 | 1297 | ||
| 1118 | return platform_driver_register(&imx6_pcie_driver); | 1298 | return platform_driver_register(&imx6_pcie_driver); |
| 1119 | } | 1299 | } |
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 7a2925a16ab8..24f5a775ad34 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
| @@ -477,8 +477,10 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
| 477 | 477 | ||
| 478 | iounmap(msix_tbl); | 478 | iounmap(msix_tbl); |
| 479 | 479 | ||
| 480 | if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) | 480 | if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT) { |
| 481 | dev_dbg(pci->dev, "MSI-X entry ctrl set\n"); | ||
| 481 | return -EPERM; | 482 | return -EPERM; |
| 483 | } | ||
| 482 | 484 | ||
| 483 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | 485 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, |
| 484 | epc->mem->page_size); | 486 | epc->mem->page_size); |
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 721d60a5d9e4..25087d3c9a82 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
| @@ -120,9 +120,9 @@ static void dw_chained_msi_isr(struct irq_desc *desc) | |||
| 120 | chained_irq_exit(chip, desc); | 120 | chained_irq_exit(chip, desc); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) | 123 | static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) |
| 124 | { | 124 | { |
| 125 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | 125 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
| 126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 127 | u64 msi_target; | 127 | u64 msi_target; |
| 128 | 128 | ||
| @@ -135,61 +135,61 @@ static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 135 | msg->address_hi = upper_32_bits(msi_target); | 135 | msg->address_hi = upper_32_bits(msi_target); |
| 136 | 136 | ||
| 137 | if (pp->ops->get_msi_data) | 137 | if (pp->ops->get_msi_data) |
| 138 | msg->data = pp->ops->get_msi_data(pp, data->hwirq); | 138 | msg->data = pp->ops->get_msi_data(pp, d->hwirq); |
| 139 | else | 139 | else |
| 140 | msg->data = data->hwirq; | 140 | msg->data = d->hwirq; |
| 141 | 141 | ||
| 142 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | 142 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", |
| 143 | (int)data->hwirq, msg->address_hi, msg->address_lo); | 143 | (int)d->hwirq, msg->address_hi, msg->address_lo); |
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static int dw_pci_msi_set_affinity(struct irq_data *irq_data, | 146 | static int dw_pci_msi_set_affinity(struct irq_data *d, |
| 147 | const struct cpumask *mask, bool force) | 147 | const struct cpumask *mask, bool force) |
| 148 | { | 148 | { |
| 149 | return -EINVAL; | 149 | return -EINVAL; |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | static void dw_pci_bottom_mask(struct irq_data *data) | 152 | static void dw_pci_bottom_mask(struct irq_data *d) |
| 153 | { | 153 | { |
| 154 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | 154 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
| 155 | unsigned int res, bit, ctrl; | 155 | unsigned int res, bit, ctrl; |
| 156 | unsigned long flags; | 156 | unsigned long flags; |
| 157 | 157 | ||
| 158 | raw_spin_lock_irqsave(&pp->lock, flags); | 158 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 159 | 159 | ||
| 160 | if (pp->ops->msi_clear_irq) { | 160 | if (pp->ops->msi_clear_irq) { |
| 161 | pp->ops->msi_clear_irq(pp, data->hwirq); | 161 | pp->ops->msi_clear_irq(pp, d->hwirq); |
| 162 | } else { | 162 | } else { |
| 163 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | 163 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 164 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | 164 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 165 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | 165 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
| 166 | 166 | ||
| 167 | pp->irq_status[ctrl] &= ~(1 << bit); | 167 | pp->irq_mask[ctrl] |= BIT(bit); |
| 168 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 168 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
| 169 | ~pp->irq_status[ctrl]); | 169 | pp->irq_mask[ctrl]); |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 172 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static void dw_pci_bottom_unmask(struct irq_data *data) | 175 | static void dw_pci_bottom_unmask(struct irq_data *d) |
| 176 | { | 176 | { |
| 177 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | 177 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
| 178 | unsigned int res, bit, ctrl; | 178 | unsigned int res, bit, ctrl; |
| 179 | unsigned long flags; | 179 | unsigned long flags; |
| 180 | 180 | ||
| 181 | raw_spin_lock_irqsave(&pp->lock, flags); | 181 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 182 | 182 | ||
| 183 | if (pp->ops->msi_set_irq) { | 183 | if (pp->ops->msi_set_irq) { |
| 184 | pp->ops->msi_set_irq(pp, data->hwirq); | 184 | pp->ops->msi_set_irq(pp, d->hwirq); |
| 185 | } else { | 185 | } else { |
| 186 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | 186 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 187 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | 187 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 188 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | 188 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
| 189 | 189 | ||
| 190 | pp->irq_status[ctrl] |= 1 << bit; | 190 | pp->irq_mask[ctrl] &= ~BIT(bit); |
| 191 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 191 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
| 192 | ~pp->irq_status[ctrl]); | 192 | pp->irq_mask[ctrl]); |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 195 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| @@ -207,7 +207,7 @@ static void dw_pci_bottom_ack(struct irq_data *d) | |||
| 207 | 207 | ||
| 208 | raw_spin_lock_irqsave(&pp->lock, flags); | 208 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 209 | 209 | ||
| 210 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, 1 << bit); | 210 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); |
| 211 | 211 | ||
| 212 | if (pp->ops->msi_irq_ack) | 212 | if (pp->ops->msi_irq_ack) |
| 213 | pp->ops->msi_irq_ack(d->hwirq, pp); | 213 | pp->ops->msi_irq_ack(d->hwirq, pp); |
| @@ -255,13 +255,13 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, | |||
| 255 | static void dw_pcie_irq_domain_free(struct irq_domain *domain, | 255 | static void dw_pcie_irq_domain_free(struct irq_domain *domain, |
| 256 | unsigned int virq, unsigned int nr_irqs) | 256 | unsigned int virq, unsigned int nr_irqs) |
| 257 | { | 257 | { |
| 258 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | 258 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
| 259 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | 259 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
| 260 | unsigned long flags; | 260 | unsigned long flags; |
| 261 | 261 | ||
| 262 | raw_spin_lock_irqsave(&pp->lock, flags); | 262 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 263 | 263 | ||
| 264 | bitmap_release_region(pp->msi_irq_in_use, data->hwirq, | 264 | bitmap_release_region(pp->msi_irq_in_use, d->hwirq, |
| 265 | order_base_2(nr_irqs)); | 265 | order_base_2(nr_irqs)); |
| 266 | 266 | ||
| 267 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 267 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| @@ -439,7 +439,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 439 | if (ret) | 439 | if (ret) |
| 440 | pci->num_viewport = 2; | 440 | pci->num_viewport = 2; |
| 441 | 441 | ||
| 442 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 442 | if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { |
| 443 | /* | 443 | /* |
| 444 | * If a specific SoC driver needs to change the | 444 | * If a specific SoC driver needs to change the |
| 445 | * default number of vectors, it needs to implement | 445 | * default number of vectors, it needs to implement |
| @@ -512,8 +512,9 @@ error: | |||
| 512 | return ret; | 512 | return ret; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 515 | static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
| 516 | u32 devfn, int where, int size, u32 *val) | 516 | u32 devfn, int where, int size, u32 *val, |
| 517 | bool write) | ||
| 517 | { | 518 | { |
| 518 | int ret, type; | 519 | int ret, type; |
| 519 | u32 busdev, cfg_size; | 520 | u32 busdev, cfg_size; |
| @@ -521,9 +522,6 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
| 521 | void __iomem *va_cfg_base; | 522 | void __iomem *va_cfg_base; |
| 522 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 523 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 523 | 524 | ||
| 524 | if (pp->ops->rd_other_conf) | ||
| 525 | return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); | ||
| 526 | |||
| 527 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | 525 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | |
| 528 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | 526 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); |
| 529 | 527 | ||
| @@ -542,7 +540,11 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
| 542 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | 540 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, |
| 543 | type, cpu_addr, | 541 | type, cpu_addr, |
| 544 | busdev, cfg_size); | 542 | busdev, cfg_size); |
| 545 | ret = dw_pcie_read(va_cfg_base + where, size, val); | 543 | if (write) |
| 544 | ret = dw_pcie_write(va_cfg_base + where, size, *val); | ||
| 545 | else | ||
| 546 | ret = dw_pcie_read(va_cfg_base + where, size, val); | ||
| 547 | |||
| 546 | if (pci->num_viewport <= 2) | 548 | if (pci->num_viewport <= 2) |
| 547 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | 549 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, |
| 548 | PCIE_ATU_TYPE_IO, pp->io_base, | 550 | PCIE_ATU_TYPE_IO, pp->io_base, |
| @@ -551,43 +553,26 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
| 551 | return ret; | 553 | return ret; |
| 552 | } | 554 | } |
| 553 | 555 | ||
| 556 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 557 | u32 devfn, int where, int size, u32 *val) | ||
| 558 | { | ||
| 559 | if (pp->ops->rd_other_conf) | ||
| 560 | return pp->ops->rd_other_conf(pp, bus, devfn, where, | ||
| 561 | size, val); | ||
| 562 | |||
| 563 | return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, | ||
| 564 | false); | ||
| 565 | } | ||
| 566 | |||
| 554 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 567 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
| 555 | u32 devfn, int where, int size, u32 val) | 568 | u32 devfn, int where, int size, u32 val) |
| 556 | { | 569 | { |
| 557 | int ret, type; | ||
| 558 | u32 busdev, cfg_size; | ||
| 559 | u64 cpu_addr; | ||
| 560 | void __iomem *va_cfg_base; | ||
| 561 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 562 | |||
| 563 | if (pp->ops->wr_other_conf) | 570 | if (pp->ops->wr_other_conf) |
| 564 | return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); | 571 | return pp->ops->wr_other_conf(pp, bus, devfn, where, |
| 565 | 572 | size, val); | |
| 566 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | ||
| 567 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | ||
| 568 | 573 | ||
| 569 | if (bus->parent->number == pp->root_bus_nr) { | 574 | return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, |
| 570 | type = PCIE_ATU_TYPE_CFG0; | 575 | true); |
| 571 | cpu_addr = pp->cfg0_base; | ||
| 572 | cfg_size = pp->cfg0_size; | ||
| 573 | va_cfg_base = pp->va_cfg0_base; | ||
| 574 | } else { | ||
| 575 | type = PCIE_ATU_TYPE_CFG1; | ||
| 576 | cpu_addr = pp->cfg1_base; | ||
| 577 | cfg_size = pp->cfg1_size; | ||
| 578 | va_cfg_base = pp->va_cfg1_base; | ||
| 579 | } | ||
| 580 | |||
| 581 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 582 | type, cpu_addr, | ||
| 583 | busdev, cfg_size); | ||
| 584 | ret = dw_pcie_write(va_cfg_base + where, size, val); | ||
| 585 | if (pci->num_viewport <= 2) | ||
| 586 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 587 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
| 588 | pp->io_bus_addr, pp->io_size); | ||
| 589 | |||
| 590 | return ret; | ||
| 591 | } | 576 | } |
| 592 | 577 | ||
| 593 | static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, | 578 | static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, |
| @@ -665,13 +650,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
| 665 | 650 | ||
| 666 | /* Initialize IRQ Status array */ | 651 | /* Initialize IRQ Status array */ |
| 667 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { | 652 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
| 653 | pp->irq_mask[ctrl] = ~0; | ||
| 668 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + | 654 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + |
| 669 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 655 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
| 670 | 4, ~0); | 656 | 4, pp->irq_mask[ctrl]); |
| 671 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + | 657 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + |
| 672 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 658 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
| 673 | 4, ~0); | 659 | 4, ~0); |
| 674 | pp->irq_status[ctrl] = 0; | ||
| 675 | } | 660 | } |
| 676 | 661 | ||
| 677 | /* Setup RC BARs */ | 662 | /* Setup RC BARs */ |
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 3be87126aef3..932dbd0b34b6 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c | |||
| @@ -13,11 +13,9 @@ | |||
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/of_device.h> | 15 | #include <linux/of_device.h> |
| 16 | #include <linux/of_gpio.h> | ||
| 17 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
| 18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 19 | #include <linux/resource.h> | 18 | #include <linux/resource.h> |
| 20 | #include <linux/signal.h> | ||
| 21 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 22 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
| 23 | 21 | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 93ef8c31fb39..31f6331ca46f 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) | 23 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) |
| 24 | { | 24 | { |
| 25 | if ((uintptr_t)addr & (size - 1)) { | 25 | if (!IS_ALIGNED((uintptr_t)addr, size)) { |
| 26 | *val = 0; | 26 | *val = 0; |
| 27 | return PCIBIOS_BAD_REGISTER_NUMBER; | 27 | return PCIBIOS_BAD_REGISTER_NUMBER; |
| 28 | } | 28 | } |
| @@ -43,7 +43,7 @@ int dw_pcie_read(void __iomem *addr, int size, u32 *val) | |||
| 43 | 43 | ||
| 44 | int dw_pcie_write(void __iomem *addr, int size, u32 val) | 44 | int dw_pcie_write(void __iomem *addr, int size, u32 val) |
| 45 | { | 45 | { |
| 46 | if ((uintptr_t)addr & (size - 1)) | 46 | if (!IS_ALIGNED((uintptr_t)addr, size)) |
| 47 | return PCIBIOS_BAD_REGISTER_NUMBER; | 47 | return PCIBIOS_BAD_REGISTER_NUMBER; |
| 48 | 48 | ||
| 49 | if (size == 4) | 49 | if (size == 4) |
| @@ -306,7 +306,7 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | |||
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); | 308 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); |
| 309 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); | 309 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, (u32)~PCIE_ATU_ENABLE); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | int dw_pcie_wait_for_link(struct dw_pcie *pci) | 312 | int dw_pcie_wait_for_link(struct dw_pcie *pci) |
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 1f56e6ae34ff..377f4c0b52da 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #ifndef _PCIE_DESIGNWARE_H | 11 | #ifndef _PCIE_DESIGNWARE_H |
| 12 | #define _PCIE_DESIGNWARE_H | 12 | #define _PCIE_DESIGNWARE_H |
| 13 | 13 | ||
| 14 | #include <linux/bitfield.h> | ||
| 14 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
| 15 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
| 16 | #include <linux/msi.h> | 17 | #include <linux/msi.h> |
| @@ -30,23 +31,25 @@ | |||
| 30 | 31 | ||
| 31 | /* Synopsys-specific PCIe configuration registers */ | 32 | /* Synopsys-specific PCIe configuration registers */ |
| 32 | #define PCIE_PORT_LINK_CONTROL 0x710 | 33 | #define PCIE_PORT_LINK_CONTROL 0x710 |
| 33 | #define PORT_LINK_MODE_MASK (0x3f << 16) | 34 | #define PORT_LINK_MODE_MASK GENMASK(21, 16) |
| 34 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | 35 | #define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n) |
| 35 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | 36 | #define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1) |
| 36 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | 37 | #define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3) |
| 37 | #define PORT_LINK_MODE_8_LANES (0xf << 16) | 38 | #define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7) |
| 39 | #define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf) | ||
| 38 | 40 | ||
| 39 | #define PCIE_PORT_DEBUG0 0x728 | 41 | #define PCIE_PORT_DEBUG0 0x728 |
| 40 | #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f | 42 | #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f |
| 41 | #define PORT_LOGIC_LTSSM_STATE_L0 0x11 | 43 | #define PORT_LOGIC_LTSSM_STATE_L0 0x11 |
| 42 | 44 | ||
| 43 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 45 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
| 44 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | 46 | #define PORT_LOGIC_SPEED_CHANGE BIT(17) |
| 45 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) | 47 | #define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8) |
| 46 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) | 48 | #define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n) |
| 47 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | 49 | #define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1) |
| 48 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | 50 | #define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2) |
| 49 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) | 51 | #define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4) |
| 52 | #define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8) | ||
| 50 | 53 | ||
| 51 | #define PCIE_MSI_ADDR_LO 0x820 | 54 | #define PCIE_MSI_ADDR_LO 0x820 |
| 52 | #define PCIE_MSI_ADDR_HI 0x824 | 55 | #define PCIE_MSI_ADDR_HI 0x824 |
| @@ -55,30 +58,30 @@ | |||
| 55 | #define PCIE_MSI_INTR0_STATUS 0x830 | 58 | #define PCIE_MSI_INTR0_STATUS 0x830 |
| 56 | 59 | ||
| 57 | #define PCIE_ATU_VIEWPORT 0x900 | 60 | #define PCIE_ATU_VIEWPORT 0x900 |
| 58 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) | 61 | #define PCIE_ATU_REGION_INBOUND BIT(31) |
| 59 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) | 62 | #define PCIE_ATU_REGION_OUTBOUND 0 |
| 60 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) | 63 | #define PCIE_ATU_REGION_INDEX2 0x2 |
| 61 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) | 64 | #define PCIE_ATU_REGION_INDEX1 0x1 |
| 62 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) | 65 | #define PCIE_ATU_REGION_INDEX0 0x0 |
| 63 | #define PCIE_ATU_CR1 0x904 | 66 | #define PCIE_ATU_CR1 0x904 |
| 64 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) | 67 | #define PCIE_ATU_TYPE_MEM 0x0 |
| 65 | #define PCIE_ATU_TYPE_IO (0x2 << 0) | 68 | #define PCIE_ATU_TYPE_IO 0x2 |
| 66 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) | 69 | #define PCIE_ATU_TYPE_CFG0 0x4 |
| 67 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) | 70 | #define PCIE_ATU_TYPE_CFG1 0x5 |
| 68 | #define PCIE_ATU_CR2 0x908 | 71 | #define PCIE_ATU_CR2 0x908 |
| 69 | #define PCIE_ATU_ENABLE (0x1 << 31) | 72 | #define PCIE_ATU_ENABLE BIT(31) |
| 70 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) | 73 | #define PCIE_ATU_BAR_MODE_ENABLE BIT(30) |
| 71 | #define PCIE_ATU_LOWER_BASE 0x90C | 74 | #define PCIE_ATU_LOWER_BASE 0x90C |
| 72 | #define PCIE_ATU_UPPER_BASE 0x910 | 75 | #define PCIE_ATU_UPPER_BASE 0x910 |
| 73 | #define PCIE_ATU_LIMIT 0x914 | 76 | #define PCIE_ATU_LIMIT 0x914 |
| 74 | #define PCIE_ATU_LOWER_TARGET 0x918 | 77 | #define PCIE_ATU_LOWER_TARGET 0x918 |
| 75 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) | 78 | #define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x) |
| 76 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) | 79 | #define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x) |
| 77 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | 80 | #define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x) |
| 78 | #define PCIE_ATU_UPPER_TARGET 0x91C | 81 | #define PCIE_ATU_UPPER_TARGET 0x91C |
| 79 | 82 | ||
| 80 | #define PCIE_MISC_CONTROL_1_OFF 0x8BC | 83 | #define PCIE_MISC_CONTROL_1_OFF 0x8BC |
| 81 | #define PCIE_DBI_RO_WR_EN (0x1 << 0) | 84 | #define PCIE_DBI_RO_WR_EN BIT(0) |
| 82 | 85 | ||
| 83 | /* | 86 | /* |
| 84 | * iATU Unroll-specific register definitions | 87 | * iATU Unroll-specific register definitions |
| @@ -105,7 +108,7 @@ | |||
| 105 | ((region) << 9) | 108 | ((region) << 9) |
| 106 | 109 | ||
| 107 | #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ | 110 | #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ |
| 108 | (((region) << 9) | (0x1 << 8)) | 111 | (((region) << 9) | BIT(8)) |
| 109 | 112 | ||
| 110 | #define MAX_MSI_IRQS 256 | 113 | #define MAX_MSI_IRQS 256 |
| 111 | #define MAX_MSI_IRQS_PER_CTRL 32 | 114 | #define MAX_MSI_IRQS_PER_CTRL 32 |
| @@ -177,7 +180,7 @@ struct pcie_port { | |||
| 177 | struct irq_domain *msi_domain; | 180 | struct irq_domain *msi_domain; |
| 178 | dma_addr_t msi_data; | 181 | dma_addr_t msi_data; |
| 179 | u32 num_vectors; | 182 | u32 num_vectors; |
| 180 | u32 irq_status[MAX_MSI_CTRLS]; | 183 | u32 irq_mask[MAX_MSI_CTRLS]; |
| 181 | raw_spinlock_t lock; | 184 | raw_spinlock_t lock; |
| 182 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | 185 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); |
| 183 | }; | 186 | }; |
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index d185ea5fe996..a7f703556790 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c | |||
| @@ -1228,7 +1228,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) | |||
| 1228 | 1228 | ||
| 1229 | pcie->ops = of_device_get_match_data(dev); | 1229 | pcie->ops = of_device_get_match_data(dev); |
| 1230 | 1230 | ||
| 1231 | pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); | 1231 | pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH); |
| 1232 | if (IS_ERR(pcie->reset)) { | 1232 | if (IS_ERR(pcie->reset)) { |
| 1233 | ret = PTR_ERR(pcie->reset); | 1233 | ret = PTR_ERR(pcie->reset); |
| 1234 | goto err_pm_runtime_put; | 1234 | goto err_pm_runtime_put; |
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 7d05e51205b3..27edcebd1726 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/irqchip/chained_irq.h> | 11 | #include <linux/irqchip/chained_irq.h> |
| 12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
| 13 | #include <linux/of_address.h> | 13 | #include <linux/of_address.h> |
| 14 | #include <linux/of_device.h> | ||
| 14 | #include <linux/of_irq.h> | 15 | #include <linux/of_irq.h> |
| 15 | #include <linux/of_pci.h> | 16 | #include <linux/of_pci.h> |
| 16 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
| @@ -37,7 +38,12 @@ | |||
| 37 | #define RP_LTSSM_MASK 0x1f | 38 | #define RP_LTSSM_MASK 0x1f |
| 38 | #define LTSSM_L0 0xf | 39 | #define LTSSM_L0 0xf |
| 39 | 40 | ||
| 40 | #define PCIE_CAP_OFFSET 0x80 | 41 | #define S10_RP_TX_CNTRL 0x2004 |
| 42 | #define S10_RP_RXCPL_REG 0x2008 | ||
| 43 | #define S10_RP_RXCPL_STATUS 0x200C | ||
| 44 | #define S10_RP_CFG_ADDR(pcie, reg) \ | ||
| 45 | (((pcie)->hip_base) + (reg) + (1 << 20)) | ||
| 46 | |||
| 41 | /* TLP configuration type 0 and 1 */ | 47 | /* TLP configuration type 0 and 1 */ |
| 42 | #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ | 48 | #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ |
| 43 | #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ | 49 | #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ |
| @@ -49,18 +55,19 @@ | |||
| 49 | #define RP_DEVFN 0 | 55 | #define RP_DEVFN 0 |
| 50 | #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) | 56 | #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) |
| 51 | #define TLP_CFGRD_DW0(pcie, bus) \ | 57 | #define TLP_CFGRD_DW0(pcie, bus) \ |
| 52 | ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ | 58 | ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgrd0 \ |
| 53 | : TLP_FMTTYPE_CFGRD1) << 24) | \ | 59 | : pcie->pcie_data->cfgrd1) << 24) | \ |
| 54 | TLP_PAYLOAD_SIZE) | 60 | TLP_PAYLOAD_SIZE) |
| 55 | #define TLP_CFGWR_DW0(pcie, bus) \ | 61 | #define TLP_CFGWR_DW0(pcie, bus) \ |
| 56 | ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ | 62 | ((((bus == pcie->root_bus_nr) ? pcie->pcie_data->cfgwr0 \ |
| 57 | : TLP_FMTTYPE_CFGWR1) << 24) | \ | 63 | : pcie->pcie_data->cfgwr1) << 24) | \ |
| 58 | TLP_PAYLOAD_SIZE) | 64 | TLP_PAYLOAD_SIZE) |
| 59 | #define TLP_CFG_DW1(pcie, tag, be) \ | 65 | #define TLP_CFG_DW1(pcie, tag, be) \ |
| 60 | (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) | 66 | (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) |
| 61 | #define TLP_CFG_DW2(bus, devfn, offset) \ | 67 | #define TLP_CFG_DW2(bus, devfn, offset) \ |
| 62 | (((bus) << 24) | ((devfn) << 16) | (offset)) | 68 | (((bus) << 24) | ((devfn) << 16) | (offset)) |
| 63 | #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) | 69 | #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) |
| 70 | #define TLP_BYTE_COUNT(s) (((s) >> 0) & 0xfff) | ||
| 64 | #define TLP_HDR_SIZE 3 | 71 | #define TLP_HDR_SIZE 3 |
| 65 | #define TLP_LOOP 500 | 72 | #define TLP_LOOP 500 |
| 66 | 73 | ||
| @@ -69,14 +76,47 @@ | |||
| 69 | 76 | ||
| 70 | #define DWORD_MASK 3 | 77 | #define DWORD_MASK 3 |
| 71 | 78 | ||
| 79 | #define S10_TLP_FMTTYPE_CFGRD0 0x05 | ||
| 80 | #define S10_TLP_FMTTYPE_CFGRD1 0x04 | ||
| 81 | #define S10_TLP_FMTTYPE_CFGWR0 0x45 | ||
| 82 | #define S10_TLP_FMTTYPE_CFGWR1 0x44 | ||
| 83 | |||
| 84 | enum altera_pcie_version { | ||
| 85 | ALTERA_PCIE_V1 = 0, | ||
| 86 | ALTERA_PCIE_V2, | ||
| 87 | }; | ||
| 88 | |||
| 72 | struct altera_pcie { | 89 | struct altera_pcie { |
| 73 | struct platform_device *pdev; | 90 | struct platform_device *pdev; |
| 74 | void __iomem *cra_base; /* DT Cra */ | 91 | void __iomem *cra_base; |
| 92 | void __iomem *hip_base; | ||
| 75 | int irq; | 93 | int irq; |
| 76 | u8 root_bus_nr; | 94 | u8 root_bus_nr; |
| 77 | struct irq_domain *irq_domain; | 95 | struct irq_domain *irq_domain; |
| 78 | struct resource bus_range; | 96 | struct resource bus_range; |
| 79 | struct list_head resources; | 97 | struct list_head resources; |
| 98 | const struct altera_pcie_data *pcie_data; | ||
| 99 | }; | ||
| 100 | |||
| 101 | struct altera_pcie_ops { | ||
| 102 | int (*tlp_read_pkt)(struct altera_pcie *pcie, u32 *value); | ||
| 103 | void (*tlp_write_pkt)(struct altera_pcie *pcie, u32 *headers, | ||
| 104 | u32 data, bool align); | ||
| 105 | bool (*get_link_status)(struct altera_pcie *pcie); | ||
| 106 | int (*rp_read_cfg)(struct altera_pcie *pcie, int where, | ||
| 107 | int size, u32 *value); | ||
| 108 | int (*rp_write_cfg)(struct altera_pcie *pcie, u8 busno, | ||
| 109 | int where, int size, u32 value); | ||
| 110 | }; | ||
| 111 | |||
| 112 | struct altera_pcie_data { | ||
| 113 | const struct altera_pcie_ops *ops; | ||
| 114 | enum altera_pcie_version version; | ||
| 115 | u32 cap_offset; /* PCIe capability structure register offset */ | ||
| 116 | u32 cfgrd0; | ||
| 117 | u32 cfgrd1; | ||
| 118 | u32 cfgwr0; | ||
| 119 | u32 cfgwr1; | ||
| 80 | }; | 120 | }; |
| 81 | 121 | ||
| 82 | struct tlp_rp_regpair_t { | 122 | struct tlp_rp_regpair_t { |
| @@ -101,6 +141,15 @@ static bool altera_pcie_link_up(struct altera_pcie *pcie) | |||
| 101 | return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); | 141 | return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); |
| 102 | } | 142 | } |
| 103 | 143 | ||
| 144 | static bool s10_altera_pcie_link_up(struct altera_pcie *pcie) | ||
| 145 | { | ||
| 146 | void __iomem *addr = S10_RP_CFG_ADDR(pcie, | ||
| 147 | pcie->pcie_data->cap_offset + | ||
| 148 | PCI_EXP_LNKSTA); | ||
| 149 | |||
| 150 | return !!(readw(addr) & PCI_EXP_LNKSTA_DLLLA); | ||
| 151 | } | ||
| 152 | |||
| 104 | /* | 153 | /* |
| 105 | * Altera PCIe port uses BAR0 of RC's configuration space as the translation | 154 | * Altera PCIe port uses BAR0 of RC's configuration space as the translation |
| 106 | * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space | 155 | * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space |
| @@ -128,12 +177,18 @@ static void tlp_write_tx(struct altera_pcie *pcie, | |||
| 128 | cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); | 177 | cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); |
| 129 | } | 178 | } |
| 130 | 179 | ||
| 180 | static void s10_tlp_write_tx(struct altera_pcie *pcie, u32 reg0, u32 ctrl) | ||
| 181 | { | ||
| 182 | cra_writel(pcie, reg0, RP_TX_REG0); | ||
| 183 | cra_writel(pcie, ctrl, S10_RP_TX_CNTRL); | ||
| 184 | } | ||
| 185 | |||
| 131 | static bool altera_pcie_valid_device(struct altera_pcie *pcie, | 186 | static bool altera_pcie_valid_device(struct altera_pcie *pcie, |
| 132 | struct pci_bus *bus, int dev) | 187 | struct pci_bus *bus, int dev) |
| 133 | { | 188 | { |
| 134 | /* If there is no link, then there is no device */ | 189 | /* If there is no link, then there is no device */ |
| 135 | if (bus->number != pcie->root_bus_nr) { | 190 | if (bus->number != pcie->root_bus_nr) { |
| 136 | if (!altera_pcie_link_up(pcie)) | 191 | if (!pcie->pcie_data->ops->get_link_status(pcie)) |
| 137 | return false; | 192 | return false; |
| 138 | } | 193 | } |
| 139 | 194 | ||
| @@ -183,6 +238,53 @@ static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) | |||
| 183 | return PCIBIOS_DEVICE_NOT_FOUND; | 238 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 184 | } | 239 | } |
| 185 | 240 | ||
| 241 | static int s10_tlp_read_packet(struct altera_pcie *pcie, u32 *value) | ||
| 242 | { | ||
| 243 | u32 ctrl; | ||
| 244 | u32 comp_status; | ||
| 245 | u32 dw[4]; | ||
| 246 | u32 count; | ||
| 247 | struct device *dev = &pcie->pdev->dev; | ||
| 248 | |||
| 249 | for (count = 0; count < TLP_LOOP; count++) { | ||
| 250 | ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); | ||
| 251 | if (ctrl & RP_RXCPL_SOP) { | ||
| 252 | /* Read first DW */ | ||
| 253 | dw[0] = cra_readl(pcie, S10_RP_RXCPL_REG); | ||
| 254 | break; | ||
| 255 | } | ||
| 256 | |||
| 257 | udelay(5); | ||
| 258 | } | ||
| 259 | |||
| 260 | /* SOP detection failed, return error */ | ||
| 261 | if (count == TLP_LOOP) | ||
| 262 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 263 | |||
| 264 | count = 1; | ||
| 265 | |||
| 266 | /* Poll for EOP */ | ||
| 267 | while (count < ARRAY_SIZE(dw)) { | ||
| 268 | ctrl = cra_readl(pcie, S10_RP_RXCPL_STATUS); | ||
| 269 | dw[count++] = cra_readl(pcie, S10_RP_RXCPL_REG); | ||
| 270 | if (ctrl & RP_RXCPL_EOP) { | ||
| 271 | comp_status = TLP_COMP_STATUS(dw[1]); | ||
| 272 | if (comp_status) | ||
| 273 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 274 | |||
| 275 | if (value && TLP_BYTE_COUNT(dw[1]) == sizeof(u32) && | ||
| 276 | count == 4) | ||
| 277 | *value = dw[3]; | ||
| 278 | |||
| 279 | return PCIBIOS_SUCCESSFUL; | ||
| 280 | } | ||
| 281 | } | ||
| 282 | |||
| 283 | dev_warn(dev, "Malformed TLP packet\n"); | ||
| 284 | |||
| 285 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 286 | } | ||
| 287 | |||
| 186 | static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, | 288 | static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, |
| 187 | u32 data, bool align) | 289 | u32 data, bool align) |
| 188 | { | 290 | { |
| @@ -210,6 +312,15 @@ static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, | |||
| 210 | tlp_write_tx(pcie, &tlp_rp_regdata); | 312 | tlp_write_tx(pcie, &tlp_rp_regdata); |
| 211 | } | 313 | } |
| 212 | 314 | ||
| 315 | static void s10_tlp_write_packet(struct altera_pcie *pcie, u32 *headers, | ||
| 316 | u32 data, bool dummy) | ||
| 317 | { | ||
| 318 | s10_tlp_write_tx(pcie, headers[0], RP_TX_SOP); | ||
| 319 | s10_tlp_write_tx(pcie, headers[1], 0); | ||
| 320 | s10_tlp_write_tx(pcie, headers[2], 0); | ||
| 321 | s10_tlp_write_tx(pcie, data, RP_TX_EOP); | ||
| 322 | } | ||
| 323 | |||
| 213 | static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, | 324 | static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, |
| 214 | int where, u8 byte_en, u32 *value) | 325 | int where, u8 byte_en, u32 *value) |
| 215 | { | 326 | { |
| @@ -219,9 +330,9 @@ static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, | |||
| 219 | headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); | 330 | headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); |
| 220 | headers[2] = TLP_CFG_DW2(bus, devfn, where); | 331 | headers[2] = TLP_CFG_DW2(bus, devfn, where); |
| 221 | 332 | ||
| 222 | tlp_write_packet(pcie, headers, 0, false); | 333 | pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, 0, false); |
| 223 | 334 | ||
| 224 | return tlp_read_packet(pcie, value); | 335 | return pcie->pcie_data->ops->tlp_read_pkt(pcie, value); |
| 225 | } | 336 | } |
| 226 | 337 | ||
| 227 | static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, | 338 | static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, |
| @@ -236,11 +347,13 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, | |||
| 236 | 347 | ||
| 237 | /* check alignment to Qword */ | 348 | /* check alignment to Qword */ |
| 238 | if ((where & 0x7) == 0) | 349 | if ((where & 0x7) == 0) |
| 239 | tlp_write_packet(pcie, headers, value, true); | 350 | pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, |
| 351 | value, true); | ||
| 240 | else | 352 | else |
| 241 | tlp_write_packet(pcie, headers, value, false); | 353 | pcie->pcie_data->ops->tlp_write_pkt(pcie, headers, |
| 354 | value, false); | ||
| 242 | 355 | ||
| 243 | ret = tlp_read_packet(pcie, NULL); | 356 | ret = pcie->pcie_data->ops->tlp_read_pkt(pcie, NULL); |
| 244 | if (ret != PCIBIOS_SUCCESSFUL) | 357 | if (ret != PCIBIOS_SUCCESSFUL) |
| 245 | return ret; | 358 | return ret; |
| 246 | 359 | ||
| @@ -254,6 +367,53 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, | |||
| 254 | return PCIBIOS_SUCCESSFUL; | 367 | return PCIBIOS_SUCCESSFUL; |
| 255 | } | 368 | } |
| 256 | 369 | ||
| 370 | static int s10_rp_read_cfg(struct altera_pcie *pcie, int where, | ||
| 371 | int size, u32 *value) | ||
| 372 | { | ||
| 373 | void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); | ||
| 374 | |||
| 375 | switch (size) { | ||
| 376 | case 1: | ||
| 377 | *value = readb(addr); | ||
| 378 | break; | ||
| 379 | case 2: | ||
| 380 | *value = readw(addr); | ||
| 381 | break; | ||
| 382 | default: | ||
| 383 | *value = readl(addr); | ||
| 384 | break; | ||
| 385 | } | ||
| 386 | |||
| 387 | return PCIBIOS_SUCCESSFUL; | ||
| 388 | } | ||
| 389 | |||
| 390 | static int s10_rp_write_cfg(struct altera_pcie *pcie, u8 busno, | ||
| 391 | int where, int size, u32 value) | ||
| 392 | { | ||
| 393 | void __iomem *addr = S10_RP_CFG_ADDR(pcie, where); | ||
| 394 | |||
| 395 | switch (size) { | ||
| 396 | case 1: | ||
| 397 | writeb(value, addr); | ||
| 398 | break; | ||
| 399 | case 2: | ||
| 400 | writew(value, addr); | ||
| 401 | break; | ||
| 402 | default: | ||
| 403 | writel(value, addr); | ||
| 404 | break; | ||
| 405 | } | ||
| 406 | |||
| 407 | /* | ||
| 408 | * Monitor changes to PCI_PRIMARY_BUS register on root port | ||
| 409 | * and update local copy of root bus number accordingly. | ||
| 410 | */ | ||
| 411 | if (busno == pcie->root_bus_nr && where == PCI_PRIMARY_BUS) | ||
| 412 | pcie->root_bus_nr = value & 0xff; | ||
| 413 | |||
| 414 | return PCIBIOS_SUCCESSFUL; | ||
| 415 | } | ||
| 416 | |||
| 257 | static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, | 417 | static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, |
| 258 | unsigned int devfn, int where, int size, | 418 | unsigned int devfn, int where, int size, |
| 259 | u32 *value) | 419 | u32 *value) |
| @@ -262,6 +422,10 @@ static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, | |||
| 262 | u32 data; | 422 | u32 data; |
| 263 | u8 byte_en; | 423 | u8 byte_en; |
| 264 | 424 | ||
| 425 | if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_read_cfg) | ||
| 426 | return pcie->pcie_data->ops->rp_read_cfg(pcie, where, | ||
| 427 | size, value); | ||
| 428 | |||
| 265 | switch (size) { | 429 | switch (size) { |
| 266 | case 1: | 430 | case 1: |
| 267 | byte_en = 1 << (where & 3); | 431 | byte_en = 1 << (where & 3); |
| @@ -302,6 +466,10 @@ static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, | |||
| 302 | u32 shift = 8 * (where & 3); | 466 | u32 shift = 8 * (where & 3); |
| 303 | u8 byte_en; | 467 | u8 byte_en; |
| 304 | 468 | ||
| 469 | if (busno == pcie->root_bus_nr && pcie->pcie_data->ops->rp_write_cfg) | ||
| 470 | return pcie->pcie_data->ops->rp_write_cfg(pcie, busno, | ||
| 471 | where, size, value); | ||
| 472 | |||
| 305 | switch (size) { | 473 | switch (size) { |
| 306 | case 1: | 474 | case 1: |
| 307 | data32 = (value & 0xff) << shift; | 475 | data32 = (value & 0xff) << shift; |
| @@ -365,7 +533,8 @@ static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, | |||
| 365 | int ret; | 533 | int ret; |
| 366 | 534 | ||
| 367 | ret = _altera_pcie_cfg_read(pcie, busno, devfn, | 535 | ret = _altera_pcie_cfg_read(pcie, busno, devfn, |
| 368 | PCIE_CAP_OFFSET + offset, sizeof(*value), | 536 | pcie->pcie_data->cap_offset + offset, |
| 537 | sizeof(*value), | ||
| 369 | &data); | 538 | &data); |
| 370 | *value = data; | 539 | *value = data; |
| 371 | return ret; | 540 | return ret; |
| @@ -375,7 +544,8 @@ static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, | |||
| 375 | unsigned int devfn, int offset, u16 value) | 544 | unsigned int devfn, int offset, u16 value) |
| 376 | { | 545 | { |
| 377 | return _altera_pcie_cfg_write(pcie, busno, devfn, | 546 | return _altera_pcie_cfg_write(pcie, busno, devfn, |
| 378 | PCIE_CAP_OFFSET + offset, sizeof(value), | 547 | pcie->pcie_data->cap_offset + offset, |
| 548 | sizeof(value), | ||
| 379 | value); | 549 | value); |
| 380 | } | 550 | } |
| 381 | 551 | ||
| @@ -403,7 +573,7 @@ static void altera_wait_link_retrain(struct altera_pcie *pcie) | |||
| 403 | /* Wait for link is up */ | 573 | /* Wait for link is up */ |
| 404 | start_jiffies = jiffies; | 574 | start_jiffies = jiffies; |
| 405 | for (;;) { | 575 | for (;;) { |
| 406 | if (altera_pcie_link_up(pcie)) | 576 | if (pcie->pcie_data->ops->get_link_status(pcie)) |
| 407 | break; | 577 | break; |
| 408 | 578 | ||
| 409 | if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { | 579 | if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { |
| @@ -418,7 +588,7 @@ static void altera_pcie_retrain(struct altera_pcie *pcie) | |||
| 418 | { | 588 | { |
| 419 | u16 linkcap, linkstat, linkctl; | 589 | u16 linkcap, linkstat, linkctl; |
| 420 | 590 | ||
| 421 | if (!altera_pcie_link_up(pcie)) | 591 | if (!pcie->pcie_data->ops->get_link_status(pcie)) |
| 422 | return; | 592 | return; |
| 423 | 593 | ||
| 424 | /* | 594 | /* |
| @@ -540,12 +710,20 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie) | |||
| 540 | struct device *dev = &pcie->pdev->dev; | 710 | struct device *dev = &pcie->pdev->dev; |
| 541 | struct platform_device *pdev = pcie->pdev; | 711 | struct platform_device *pdev = pcie->pdev; |
| 542 | struct resource *cra; | 712 | struct resource *cra; |
| 713 | struct resource *hip; | ||
| 543 | 714 | ||
| 544 | cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); | 715 | cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); |
| 545 | pcie->cra_base = devm_ioremap_resource(dev, cra); | 716 | pcie->cra_base = devm_ioremap_resource(dev, cra); |
| 546 | if (IS_ERR(pcie->cra_base)) | 717 | if (IS_ERR(pcie->cra_base)) |
| 547 | return PTR_ERR(pcie->cra_base); | 718 | return PTR_ERR(pcie->cra_base); |
| 548 | 719 | ||
| 720 | if (pcie->pcie_data->version == ALTERA_PCIE_V2) { | ||
| 721 | hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip"); | ||
| 722 | pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip); | ||
| 723 | if (IS_ERR(pcie->hip_base)) | ||
| 724 | return PTR_ERR(pcie->hip_base); | ||
| 725 | } | ||
| 726 | |||
| 549 | /* setup IRQ */ | 727 | /* setup IRQ */ |
| 550 | pcie->irq = platform_get_irq(pdev, 0); | 728 | pcie->irq = platform_get_irq(pdev, 0); |
| 551 | if (pcie->irq < 0) { | 729 | if (pcie->irq < 0) { |
| @@ -562,6 +740,48 @@ static void altera_pcie_host_init(struct altera_pcie *pcie) | |||
| 562 | altera_pcie_retrain(pcie); | 740 | altera_pcie_retrain(pcie); |
| 563 | } | 741 | } |
| 564 | 742 | ||
| 743 | static const struct altera_pcie_ops altera_pcie_ops_1_0 = { | ||
| 744 | .tlp_read_pkt = tlp_read_packet, | ||
| 745 | .tlp_write_pkt = tlp_write_packet, | ||
| 746 | .get_link_status = altera_pcie_link_up, | ||
| 747 | }; | ||
| 748 | |||
| 749 | static const struct altera_pcie_ops altera_pcie_ops_2_0 = { | ||
| 750 | .tlp_read_pkt = s10_tlp_read_packet, | ||
| 751 | .tlp_write_pkt = s10_tlp_write_packet, | ||
| 752 | .get_link_status = s10_altera_pcie_link_up, | ||
| 753 | .rp_read_cfg = s10_rp_read_cfg, | ||
| 754 | .rp_write_cfg = s10_rp_write_cfg, | ||
| 755 | }; | ||
| 756 | |||
| 757 | static const struct altera_pcie_data altera_pcie_1_0_data = { | ||
| 758 | .ops = &altera_pcie_ops_1_0, | ||
| 759 | .cap_offset = 0x80, | ||
| 760 | .version = ALTERA_PCIE_V1, | ||
| 761 | .cfgrd0 = TLP_FMTTYPE_CFGRD0, | ||
| 762 | .cfgrd1 = TLP_FMTTYPE_CFGRD1, | ||
| 763 | .cfgwr0 = TLP_FMTTYPE_CFGWR0, | ||
| 764 | .cfgwr1 = TLP_FMTTYPE_CFGWR1, | ||
| 765 | }; | ||
| 766 | |||
| 767 | static const struct altera_pcie_data altera_pcie_2_0_data = { | ||
| 768 | .ops = &altera_pcie_ops_2_0, | ||
| 769 | .version = ALTERA_PCIE_V2, | ||
| 770 | .cap_offset = 0x70, | ||
| 771 | .cfgrd0 = S10_TLP_FMTTYPE_CFGRD0, | ||
| 772 | .cfgrd1 = S10_TLP_FMTTYPE_CFGRD1, | ||
| 773 | .cfgwr0 = S10_TLP_FMTTYPE_CFGWR0, | ||
| 774 | .cfgwr1 = S10_TLP_FMTTYPE_CFGWR1, | ||
| 775 | }; | ||
| 776 | |||
| 777 | static const struct of_device_id altera_pcie_of_match[] = { | ||
| 778 | {.compatible = "altr,pcie-root-port-1.0", | ||
| 779 | .data = &altera_pcie_1_0_data }, | ||
| 780 | {.compatible = "altr,pcie-root-port-2.0", | ||
| 781 | .data = &altera_pcie_2_0_data }, | ||
| 782 | {}, | ||
| 783 | }; | ||
| 784 | |||
| 565 | static int altera_pcie_probe(struct platform_device *pdev) | 785 | static int altera_pcie_probe(struct platform_device *pdev) |
| 566 | { | 786 | { |
| 567 | struct device *dev = &pdev->dev; | 787 | struct device *dev = &pdev->dev; |
| @@ -570,6 +790,7 @@ static int altera_pcie_probe(struct platform_device *pdev) | |||
| 570 | struct pci_bus *child; | 790 | struct pci_bus *child; |
| 571 | struct pci_host_bridge *bridge; | 791 | struct pci_host_bridge *bridge; |
| 572 | int ret; | 792 | int ret; |
| 793 | const struct of_device_id *match; | ||
| 573 | 794 | ||
| 574 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | 795 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 575 | if (!bridge) | 796 | if (!bridge) |
| @@ -578,6 +799,12 @@ static int altera_pcie_probe(struct platform_device *pdev) | |||
| 578 | pcie = pci_host_bridge_priv(bridge); | 799 | pcie = pci_host_bridge_priv(bridge); |
| 579 | pcie->pdev = pdev; | 800 | pcie->pdev = pdev; |
| 580 | 801 | ||
| 802 | match = of_match_device(altera_pcie_of_match, &pdev->dev); | ||
| 803 | if (!match) | ||
| 804 | return -ENODEV; | ||
| 805 | |||
| 806 | pcie->pcie_data = match->data; | ||
| 807 | |||
| 581 | ret = altera_pcie_parse_dt(pcie); | 808 | ret = altera_pcie_parse_dt(pcie); |
| 582 | if (ret) { | 809 | if (ret) { |
| 583 | dev_err(dev, "Parsing DT failed\n"); | 810 | dev_err(dev, "Parsing DT failed\n"); |
| @@ -628,11 +855,6 @@ static int altera_pcie_probe(struct platform_device *pdev) | |||
| 628 | return ret; | 855 | return ret; |
| 629 | } | 856 | } |
| 630 | 857 | ||
| 631 | static const struct of_device_id altera_pcie_of_match[] = { | ||
| 632 | { .compatible = "altr,pcie-root-port-1.0", }, | ||
| 633 | {}, | ||
| 634 | }; | ||
| 635 | |||
| 636 | static struct platform_driver altera_pcie_driver = { | 858 | static struct platform_driver altera_pcie_driver = { |
| 637 | .probe = altera_pcie_probe, | 859 | .probe = altera_pcie_probe, |
| 638 | .driver = { | 860 | .driver = { |
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index b89f850c3a4e..e90a4ebf6550 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h | |||
| @@ -378,7 +378,6 @@ int ibmphp_add_pfmem_from_mem(struct resource_node *); | |||
| 378 | struct bus_node *ibmphp_find_res_bus(u8); | 378 | struct bus_node *ibmphp_find_res_bus(u8); |
| 379 | void ibmphp_print_test(void); /* for debugging purposes */ | 379 | void ibmphp_print_test(void); /* for debugging purposes */ |
| 380 | 380 | ||
| 381 | void ibmphp_hpc_initvars(void); | ||
| 382 | int ibmphp_hpc_readslot(struct slot *, u8, u8 *); | 381 | int ibmphp_hpc_readslot(struct slot *, u8, u8 *); |
| 383 | int ibmphp_hpc_writeslot(struct slot *, u8); | 382 | int ibmphp_hpc_writeslot(struct slot *, u8); |
| 384 | void ibmphp_lock_operations(void); | 383 | void ibmphp_lock_operations(void); |
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 08a58e911fc2..17124254d897 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c | |||
| @@ -1277,8 +1277,6 @@ static int __init ibmphp_init(void) | |||
| 1277 | 1277 | ||
| 1278 | ibmphp_debug = debug; | 1278 | ibmphp_debug = debug; |
| 1279 | 1279 | ||
| 1280 | ibmphp_hpc_initvars(); | ||
| 1281 | |||
| 1282 | for (i = 0; i < 16; i++) | 1280 | for (i = 0; i < 16; i++) |
| 1283 | irqs[i] = 0; | 1281 | irqs[i] = 0; |
| 1284 | 1282 | ||
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c index 752c384cbd4c..508a62a6b5f9 100644 --- a/drivers/pci/hotplug/ibmphp_hpc.c +++ b/drivers/pci/hotplug/ibmphp_hpc.c | |||
| @@ -15,13 +15,13 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
| 17 | #include <linux/time.h> | 17 | #include <linux/time.h> |
| 18 | #include <linux/completion.h> | ||
| 18 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
| 19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 20 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
| 21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
| 22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
| 23 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
| 24 | #include <linux/semaphore.h> | ||
| 25 | #include <linux/kthread.h> | 25 | #include <linux/kthread.h> |
| 26 | #include "ibmphp.h" | 26 | #include "ibmphp.h" |
| 27 | 27 | ||
| @@ -88,10 +88,10 @@ static int to_debug = 0; | |||
| 88 | //---------------------------------------------------------------------------- | 88 | //---------------------------------------------------------------------------- |
| 89 | // global variables | 89 | // global variables |
| 90 | //---------------------------------------------------------------------------- | 90 | //---------------------------------------------------------------------------- |
| 91 | static struct mutex sem_hpcaccess; // lock access to HPC | 91 | static DEFINE_MUTEX(sem_hpcaccess); // lock access to HPC |
| 92 | static struct semaphore semOperations; // lock all operations and | 92 | static DEFINE_MUTEX(operations_mutex); // lock all operations and |
| 93 | // access to data structures | 93 | // access to data structures |
| 94 | static struct semaphore sem_exit; // make sure polling thread goes away | 94 | static DECLARE_COMPLETION(exit_complete); // make sure polling thread goes away |
| 95 | static struct task_struct *ibmphp_poll_thread; | 95 | static struct task_struct *ibmphp_poll_thread; |
| 96 | //---------------------------------------------------------------------------- | 96 | //---------------------------------------------------------------------------- |
| 97 | // local function prototypes | 97 | // local function prototypes |
| @@ -110,23 +110,6 @@ static int hpc_wait_ctlr_notworking(int, struct controller *, void __iomem *, u8 | |||
| 110 | 110 | ||
| 111 | 111 | ||
| 112 | /*---------------------------------------------------------------------- | 112 | /*---------------------------------------------------------------------- |
| 113 | * Name: ibmphp_hpc_initvars | ||
| 114 | * | ||
| 115 | * Action: initialize semaphores and variables | ||
| 116 | *---------------------------------------------------------------------*/ | ||
| 117 | void __init ibmphp_hpc_initvars(void) | ||
| 118 | { | ||
| 119 | debug("%s - Entry\n", __func__); | ||
| 120 | |||
| 121 | mutex_init(&sem_hpcaccess); | ||
| 122 | sema_init(&semOperations, 1); | ||
| 123 | sema_init(&sem_exit, 0); | ||
| 124 | to_debug = 0; | ||
| 125 | |||
| 126 | debug("%s - Exit\n", __func__); | ||
| 127 | } | ||
| 128 | |||
| 129 | /*---------------------------------------------------------------------- | ||
| 130 | * Name: i2c_ctrl_read | 113 | * Name: i2c_ctrl_read |
| 131 | * | 114 | * |
| 132 | * Action: read from HPC over I2C | 115 | * Action: read from HPC over I2C |
| @@ -780,7 +763,7 @@ void free_hpc_access(void) | |||
| 780 | *---------------------------------------------------------------------*/ | 763 | *---------------------------------------------------------------------*/ |
| 781 | void ibmphp_lock_operations(void) | 764 | void ibmphp_lock_operations(void) |
| 782 | { | 765 | { |
| 783 | down(&semOperations); | 766 | mutex_lock(&operations_mutex); |
| 784 | to_debug = 1; | 767 | to_debug = 1; |
| 785 | } | 768 | } |
| 786 | 769 | ||
| @@ -790,7 +773,7 @@ void ibmphp_lock_operations(void) | |||
| 790 | void ibmphp_unlock_operations(void) | 773 | void ibmphp_unlock_operations(void) |
| 791 | { | 774 | { |
| 792 | debug("%s - Entry\n", __func__); | 775 | debug("%s - Entry\n", __func__); |
| 793 | up(&semOperations); | 776 | mutex_unlock(&operations_mutex); |
| 794 | to_debug = 0; | 777 | to_debug = 0; |
| 795 | debug("%s - Exit\n", __func__); | 778 | debug("%s - Exit\n", __func__); |
| 796 | } | 779 | } |
| @@ -816,7 +799,7 @@ static int poll_hpc(void *data) | |||
| 816 | 799 | ||
| 817 | while (!kthread_should_stop()) { | 800 | while (!kthread_should_stop()) { |
| 818 | /* try to get the lock to do some kind of hardware access */ | 801 | /* try to get the lock to do some kind of hardware access */ |
| 819 | down(&semOperations); | 802 | mutex_lock(&operations_mutex); |
| 820 | 803 | ||
| 821 | switch (poll_state) { | 804 | switch (poll_state) { |
| 822 | case POLL_LATCH_REGISTER: | 805 | case POLL_LATCH_REGISTER: |
| @@ -871,13 +854,13 @@ static int poll_hpc(void *data) | |||
| 871 | break; | 854 | break; |
| 872 | case POLL_SLEEP: | 855 | case POLL_SLEEP: |
| 873 | /* don't sleep with a lock on the hardware */ | 856 | /* don't sleep with a lock on the hardware */ |
| 874 | up(&semOperations); | 857 | mutex_unlock(&operations_mutex); |
| 875 | msleep(POLL_INTERVAL_SEC * 1000); | 858 | msleep(POLL_INTERVAL_SEC * 1000); |
| 876 | 859 | ||
| 877 | if (kthread_should_stop()) | 860 | if (kthread_should_stop()) |
| 878 | goto out_sleep; | 861 | goto out_sleep; |
| 879 | 862 | ||
| 880 | down(&semOperations); | 863 | mutex_lock(&operations_mutex); |
| 881 | 864 | ||
| 882 | if (poll_count >= POLL_LATCH_CNT) { | 865 | if (poll_count >= POLL_LATCH_CNT) { |
| 883 | poll_count = 0; | 866 | poll_count = 0; |
| @@ -887,12 +870,12 @@ static int poll_hpc(void *data) | |||
| 887 | break; | 870 | break; |
| 888 | } | 871 | } |
| 889 | /* give up the hardware semaphore */ | 872 | /* give up the hardware semaphore */ |
| 890 | up(&semOperations); | 873 | mutex_unlock(&operations_mutex); |
| 891 | /* sleep for a short time just for good measure */ | 874 | /* sleep for a short time just for good measure */ |
| 892 | out_sleep: | 875 | out_sleep: |
| 893 | msleep(100); | 876 | msleep(100); |
| 894 | } | 877 | } |
| 895 | up(&sem_exit); | 878 | complete(&exit_complete); |
| 896 | debug("%s - Exit\n", __func__); | 879 | debug("%s - Exit\n", __func__); |
| 897 | return 0; | 880 | return 0; |
| 898 | } | 881 | } |
| @@ -1060,9 +1043,9 @@ void __exit ibmphp_hpc_stop_poll_thread(void) | |||
| 1060 | debug("after locking operations\n"); | 1043 | debug("after locking operations\n"); |
| 1061 | 1044 | ||
| 1062 | // wait for poll thread to exit | 1045 | // wait for poll thread to exit |
| 1063 | debug("before sem_exit down\n"); | 1046 | debug("before exit_complete down\n"); |
| 1064 | down(&sem_exit); | 1047 | wait_for_completion(&exit_complete); |
| 1065 | debug("after sem_exit down\n"); | 1048 | debug("after exit_completion down\n"); |
| 1066 | 1049 | ||
| 1067 | // cleanup | 1050 | // cleanup |
| 1068 | debug("before free_hpc_access\n"); | 1051 | debug("before free_hpc_access\n"); |
| @@ -1070,8 +1053,6 @@ void __exit ibmphp_hpc_stop_poll_thread(void) | |||
| 1070 | debug("after free_hpc_access\n"); | 1053 | debug("after free_hpc_access\n"); |
| 1071 | ibmphp_unlock_operations(); | 1054 | ibmphp_unlock_operations(); |
| 1072 | debug("after unlock operations\n"); | 1055 | debug("after unlock operations\n"); |
| 1073 | up(&sem_exit); | ||
| 1074 | debug("after sem exit up\n"); | ||
| 1075 | 1056 | ||
| 1076 | debug("%s - Exit\n", __func__); | 1057 | debug("%s - Exit\n", __func__); |
| 1077 | } | 1058 | } |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 7dd443aea5a5..6a2365cd794e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -156,9 +156,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd, | |||
| 156 | slot_ctrl |= (cmd & mask); | 156 | slot_ctrl |= (cmd & mask); |
| 157 | ctrl->cmd_busy = 1; | 157 | ctrl->cmd_busy = 1; |
| 158 | smp_mb(); | 158 | smp_mb(); |
| 159 | ctrl->slot_ctrl = slot_ctrl; | ||
| 159 | pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); | 160 | pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl); |
| 160 | ctrl->cmd_started = jiffies; | 161 | ctrl->cmd_started = jiffies; |
| 161 | ctrl->slot_ctrl = slot_ctrl; | ||
| 162 | 162 | ||
| 163 | /* | 163 | /* |
| 164 | * Controllers with the Intel CF118 and similar errata advertise | 164 | * Controllers with the Intel CF118 and similar errata advertise |
| @@ -736,12 +736,25 @@ void pcie_clear_hotplug_events(struct controller *ctrl) | |||
| 736 | 736 | ||
| 737 | void pcie_enable_interrupt(struct controller *ctrl) | 737 | void pcie_enable_interrupt(struct controller *ctrl) |
| 738 | { | 738 | { |
| 739 | pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE); | 739 | u16 mask; |
| 740 | |||
| 741 | mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; | ||
| 742 | pcie_write_cmd(ctrl, mask, mask); | ||
| 740 | } | 743 | } |
| 741 | 744 | ||
| 742 | void pcie_disable_interrupt(struct controller *ctrl) | 745 | void pcie_disable_interrupt(struct controller *ctrl) |
| 743 | { | 746 | { |
| 744 | pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE); | 747 | u16 mask; |
| 748 | |||
| 749 | /* | ||
| 750 | * Mask hot-plug interrupt to prevent it triggering immediately | ||
| 751 | * when the link goes inactive (we still get PME when any of the | ||
| 752 | * enabled events is detected). Same goes with Link Layer State | ||
| 753 | * changed event which generates PME immediately when the link goes | ||
| 754 | * inactive so mask it as well. | ||
| 755 | */ | ||
| 756 | mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; | ||
| 757 | pcie_write_cmd(ctrl, 0, mask); | ||
| 745 | } | 758 | } |
| 746 | 759 | ||
| 747 | /* | 760 | /* |
| @@ -920,3 +933,5 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, | |||
| 920 | PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); | 933 | PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); |
| 921 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, | 934 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0401, |
| 922 | PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); | 935 | PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); |
| 936 | DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_HXT, 0x0401, | ||
| 937 | PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); | ||
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 4c4217d0c3f1..3d32da15c215 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
| @@ -113,7 +113,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent, | |||
| 113 | * a fake root for all functions of a multi-function | 113 | * a fake root for all functions of a multi-function |
| 114 | * device we go down them as well. | 114 | * device we go down them as well. |
| 115 | */ | 115 | */ |
| 116 | if (!strcmp(node->name, "multifunc-device")) { | 116 | if (of_node_name_eq(node, "multifunc-device")) { |
| 117 | for_each_child_of_node(node, node2) { | 117 | for_each_child_of_node(node, node2) { |
| 118 | if (__of_pci_pci_compare(node2, devfn)) { | 118 | if (__of_pci_pci_compare(node2, devfn)) { |
| 119 | of_node_put(node); | 119 | of_node_put(node); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 79b1610a8beb..71853befd435 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -100,7 +100,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf, | |||
| 100 | { | 100 | { |
| 101 | struct pci_driver *pdrv = to_pci_driver(driver); | 101 | struct pci_driver *pdrv = to_pci_driver(driver); |
| 102 | const struct pci_device_id *ids = pdrv->id_table; | 102 | const struct pci_device_id *ids = pdrv->id_table; |
| 103 | __u32 vendor, device, subvendor = PCI_ANY_ID, | 103 | u32 vendor, device, subvendor = PCI_ANY_ID, |
| 104 | subdevice = PCI_ANY_ID, class = 0, class_mask = 0; | 104 | subdevice = PCI_ANY_ID, class = 0, class_mask = 0; |
| 105 | unsigned long driver_data = 0; | 105 | unsigned long driver_data = 0; |
| 106 | int fields = 0; | 106 | int fields = 0; |
| @@ -168,7 +168,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, | |||
| 168 | { | 168 | { |
| 169 | struct pci_dynid *dynid, *n; | 169 | struct pci_dynid *dynid, *n; |
| 170 | struct pci_driver *pdrv = to_pci_driver(driver); | 170 | struct pci_driver *pdrv = to_pci_driver(driver); |
| 171 | __u32 vendor, device, subvendor = PCI_ANY_ID, | 171 | u32 vendor, device, subvendor = PCI_ANY_ID, |
| 172 | subdevice = PCI_ANY_ID, class = 0, class_mask = 0; | 172 | subdevice = PCI_ANY_ID, class = 0, class_mask = 0; |
| 173 | int fields = 0; | 173 | int fields = 0; |
| 174 | size_t retval = -ENODEV; | 174 | size_t retval = -ENODEV; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9d8e3c837de..bdb442004537 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -861,7 +861,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 861 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot | 861 | if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot |
| 862 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) | 862 | && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) |
| 863 | need_restore = true; | 863 | need_restore = true; |
| 864 | /* Fall-through: force to D0 */ | 864 | /* Fall-through - force to D0 */ |
| 865 | default: | 865 | default: |
| 866 | pmcsr = 0; | 866 | pmcsr = 0; |
| 867 | break; | 867 | break; |
| @@ -1233,7 +1233,6 @@ static void pci_restore_pcie_state(struct pci_dev *dev) | |||
| 1233 | pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); | 1233 | pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); |
| 1234 | } | 1234 | } |
| 1235 | 1235 | ||
| 1236 | |||
| 1237 | static int pci_save_pcix_state(struct pci_dev *dev) | 1236 | static int pci_save_pcix_state(struct pci_dev *dev) |
| 1238 | { | 1237 | { |
| 1239 | int pos; | 1238 | int pos; |
| @@ -1270,6 +1269,45 @@ static void pci_restore_pcix_state(struct pci_dev *dev) | |||
| 1270 | pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); | 1269 | pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); |
| 1271 | } | 1270 | } |
| 1272 | 1271 | ||
| 1272 | static void pci_save_ltr_state(struct pci_dev *dev) | ||
| 1273 | { | ||
| 1274 | int ltr; | ||
| 1275 | struct pci_cap_saved_state *save_state; | ||
| 1276 | u16 *cap; | ||
| 1277 | |||
| 1278 | if (!pci_is_pcie(dev)) | ||
| 1279 | return; | ||
| 1280 | |||
| 1281 | ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); | ||
| 1282 | if (!ltr) | ||
| 1283 | return; | ||
| 1284 | |||
| 1285 | save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); | ||
| 1286 | if (!save_state) { | ||
| 1287 | pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); | ||
| 1288 | return; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | cap = (u16 *)&save_state->cap.data[0]; | ||
| 1292 | pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++); | ||
| 1293 | pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++); | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | static void pci_restore_ltr_state(struct pci_dev *dev) | ||
| 1297 | { | ||
| 1298 | struct pci_cap_saved_state *save_state; | ||
| 1299 | int ltr; | ||
| 1300 | u16 *cap; | ||
| 1301 | |||
| 1302 | save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); | ||
| 1303 | ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); | ||
| 1304 | if (!save_state || !ltr) | ||
| 1305 | return; | ||
| 1306 | |||
| 1307 | cap = (u16 *)&save_state->cap.data[0]; | ||
| 1308 | pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++); | ||
| 1309 | pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++); | ||
| 1310 | } | ||
| 1273 | 1311 | ||
| 1274 | /** | 1312 | /** |
| 1275 | * pci_save_state - save the PCI configuration space of a device before suspending | 1313 | * pci_save_state - save the PCI configuration space of a device before suspending |
| @@ -1291,6 +1329,7 @@ int pci_save_state(struct pci_dev *dev) | |||
| 1291 | if (i != 0) | 1329 | if (i != 0) |
| 1292 | return i; | 1330 | return i; |
| 1293 | 1331 | ||
| 1332 | pci_save_ltr_state(dev); | ||
| 1294 | pci_save_dpc_state(dev); | 1333 | pci_save_dpc_state(dev); |
| 1295 | return pci_save_vc_state(dev); | 1334 | return pci_save_vc_state(dev); |
| 1296 | } | 1335 | } |
| @@ -1390,7 +1429,12 @@ void pci_restore_state(struct pci_dev *dev) | |||
| 1390 | if (!dev->state_saved) | 1429 | if (!dev->state_saved) |
| 1391 | return; | 1430 | return; |
| 1392 | 1431 | ||
| 1393 | /* PCI Express register must be restored first */ | 1432 | /* |
| 1433 | * Restore max latencies (in the LTR capability) before enabling | ||
| 1434 | * LTR itself (in the PCIe capability). | ||
| 1435 | */ | ||
| 1436 | pci_restore_ltr_state(dev); | ||
| 1437 | |||
| 1394 | pci_restore_pcie_state(dev); | 1438 | pci_restore_pcie_state(dev); |
| 1395 | pci_restore_pasid_state(dev); | 1439 | pci_restore_pasid_state(dev); |
| 1396 | pci_restore_pri_state(dev); | 1440 | pci_restore_pri_state(dev); |
| @@ -2260,7 +2304,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) | |||
| 2260 | case PCI_D2: | 2304 | case PCI_D2: |
| 2261 | if (pci_no_d1d2(dev)) | 2305 | if (pci_no_d1d2(dev)) |
| 2262 | break; | 2306 | break; |
| 2263 | /* else: fall through */ | 2307 | /* else, fall through */ |
| 2264 | default: | 2308 | default: |
| 2265 | target_state = state; | 2309 | target_state = state; |
| 2266 | } | 2310 | } |
| @@ -2501,6 +2545,25 @@ void pci_config_pm_runtime_put(struct pci_dev *pdev) | |||
| 2501 | pm_runtime_put_sync(parent); | 2545 | pm_runtime_put_sync(parent); |
| 2502 | } | 2546 | } |
| 2503 | 2547 | ||
| 2548 | static const struct dmi_system_id bridge_d3_blacklist[] = { | ||
| 2549 | #ifdef CONFIG_X86 | ||
| 2550 | { | ||
| 2551 | /* | ||
| 2552 | * Gigabyte X299 root port is not marked as hotplug capable | ||
| 2553 | * which allows Linux to power manage it. However, this | ||
| 2554 | * confuses the BIOS SMI handler so don't power manage root | ||
| 2555 | * ports on that system. | ||
| 2556 | */ | ||
| 2557 | .ident = "X299 DESIGNARE EX-CF", | ||
| 2558 | .matches = { | ||
| 2559 | DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), | ||
| 2560 | DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), | ||
| 2561 | }, | ||
| 2562 | }, | ||
| 2563 | #endif | ||
| 2564 | { } | ||
| 2565 | }; | ||
| 2566 | |||
| 2504 | /** | 2567 | /** |
| 2505 | * pci_bridge_d3_possible - Is it possible to put the bridge into D3 | 2568 | * pci_bridge_d3_possible - Is it possible to put the bridge into D3 |
| 2506 | * @bridge: Bridge to check | 2569 | * @bridge: Bridge to check |
| @@ -2546,6 +2609,9 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge) | |||
| 2546 | if (bridge->is_hotplug_bridge) | 2609 | if (bridge->is_hotplug_bridge) |
| 2547 | return false; | 2610 | return false; |
| 2548 | 2611 | ||
| 2612 | if (dmi_check_system(bridge_d3_blacklist)) | ||
| 2613 | return false; | ||
| 2614 | |||
| 2549 | /* | 2615 | /* |
| 2550 | * It should be safe to put PCIe ports from 2015 or newer | 2616 | * It should be safe to put PCIe ports from 2015 or newer |
| 2551 | * to D3. | 2617 | * to D3. |
| @@ -2998,6 +3064,11 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) | |||
| 2998 | if (error) | 3064 | if (error) |
| 2999 | pci_err(dev, "unable to preallocate PCI-X save buffer\n"); | 3065 | pci_err(dev, "unable to preallocate PCI-X save buffer\n"); |
| 3000 | 3066 | ||
| 3067 | error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, | ||
| 3068 | 2 * sizeof(u16)); | ||
| 3069 | if (error) | ||
| 3070 | pci_err(dev, "unable to allocate suspend buffer for LTR\n"); | ||
| 3071 | |||
| 3001 | pci_allocate_vc_save_buffers(dev); | 3072 | pci_allocate_vc_save_buffers(dev); |
| 3002 | } | 3073 | } |
| 3003 | 3074 | ||
| @@ -5058,39 +5129,42 @@ unlock: | |||
| 5058 | return 0; | 5129 | return 0; |
| 5059 | } | 5130 | } |
| 5060 | 5131 | ||
| 5061 | /* Save and disable devices from the top of the tree down */ | 5132 | /* |
| 5062 | static void pci_bus_save_and_disable(struct pci_bus *bus) | 5133 | * Save and disable devices from the top of the tree down while holding |
| 5134 | * the @dev mutex lock for the entire tree. | ||
| 5135 | */ | ||
| 5136 | static void pci_bus_save_and_disable_locked(struct pci_bus *bus) | ||
| 5063 | { | 5137 | { |
| 5064 | struct pci_dev *dev; | 5138 | struct pci_dev *dev; |
| 5065 | 5139 | ||
| 5066 | list_for_each_entry(dev, &bus->devices, bus_list) { | 5140 | list_for_each_entry(dev, &bus->devices, bus_list) { |
| 5067 | pci_dev_lock(dev); | ||
| 5068 | pci_dev_save_and_disable(dev); | 5141 | pci_dev_save_and_disable(dev); |
| 5069 | pci_dev_unlock(dev); | ||
| 5070 | if (dev->subordinate) | 5142 | if (dev->subordinate) |
| 5071 | pci_bus_save_and_disable(dev->subordinate); | 5143 | pci_bus_save_and_disable_locked(dev->subordinate); |
| 5072 | } | 5144 | } |
| 5073 | } | 5145 | } |
| 5074 | 5146 | ||
| 5075 | /* | 5147 | /* |
| 5076 | * Restore devices from top of the tree down - parent bridges need to be | 5148 | * Restore devices from top of the tree down while holding @dev mutex lock |
| 5077 | * restored before we can get to subordinate devices. | 5149 | * for the entire tree. Parent bridges need to be restored before we can |
| 5150 | * get to subordinate devices. | ||
| 5078 | */ | 5151 | */ |
| 5079 | static void pci_bus_restore(struct pci_bus *bus) | 5152 | static void pci_bus_restore_locked(struct pci_bus *bus) |
| 5080 | { | 5153 | { |
| 5081 | struct pci_dev *dev; | 5154 | struct pci_dev *dev; |
| 5082 | 5155 | ||
| 5083 | list_for_each_entry(dev, &bus->devices, bus_list) { | 5156 | list_for_each_entry(dev, &bus->devices, bus_list) { |
| 5084 | pci_dev_lock(dev); | ||
| 5085 | pci_dev_restore(dev); | 5157 | pci_dev_restore(dev); |
| 5086 | pci_dev_unlock(dev); | ||
| 5087 | if (dev->subordinate) | 5158 | if (dev->subordinate) |
| 5088 | pci_bus_restore(dev->subordinate); | 5159 | pci_bus_restore_locked(dev->subordinate); |
| 5089 | } | 5160 | } |
| 5090 | } | 5161 | } |
| 5091 | 5162 | ||
| 5092 | /* Save and disable devices from the top of the tree down */ | 5163 | /* |
| 5093 | static void pci_slot_save_and_disable(struct pci_slot *slot) | 5164 | * Save and disable devices from the top of the tree down while holding |
| 5165 | * the @dev mutex lock for the entire tree. | ||
| 5166 | */ | ||
| 5167 | static void pci_slot_save_and_disable_locked(struct pci_slot *slot) | ||
| 5094 | { | 5168 | { |
| 5095 | struct pci_dev *dev; | 5169 | struct pci_dev *dev; |
| 5096 | 5170 | ||
| @@ -5099,26 +5173,25 @@ static void pci_slot_save_and_disable(struct pci_slot *slot) | |||
| 5099 | continue; | 5173 | continue; |
| 5100 | pci_dev_save_and_disable(dev); | 5174 | pci_dev_save_and_disable(dev); |
| 5101 | if (dev->subordinate) | 5175 | if (dev->subordinate) |
| 5102 | pci_bus_save_and_disable(dev->subordinate); | 5176 | pci_bus_save_and_disable_locked(dev->subordinate); |
| 5103 | } | 5177 | } |
| 5104 | } | 5178 | } |
| 5105 | 5179 | ||
| 5106 | /* | 5180 | /* |
| 5107 | * Restore devices from top of the tree down - parent bridges need to be | 5181 | * Restore devices from top of the tree down while holding @dev mutex lock |
| 5108 | * restored before we can get to subordinate devices. | 5182 | * for the entire tree. Parent bridges need to be restored before we can |
| 5183 | * get to subordinate devices. | ||
| 5109 | */ | 5184 | */ |
| 5110 | static void pci_slot_restore(struct pci_slot *slot) | 5185 | static void pci_slot_restore_locked(struct pci_slot *slot) |
| 5111 | { | 5186 | { |
| 5112 | struct pci_dev *dev; | 5187 | struct pci_dev *dev; |
| 5113 | 5188 | ||
| 5114 | list_for_each_entry(dev, &slot->bus->devices, bus_list) { | 5189 | list_for_each_entry(dev, &slot->bus->devices, bus_list) { |
| 5115 | if (!dev->slot || dev->slot != slot) | 5190 | if (!dev->slot || dev->slot != slot) |
| 5116 | continue; | 5191 | continue; |
| 5117 | pci_dev_lock(dev); | ||
| 5118 | pci_dev_restore(dev); | 5192 | pci_dev_restore(dev); |
| 5119 | pci_dev_unlock(dev); | ||
| 5120 | if (dev->subordinate) | 5193 | if (dev->subordinate) |
| 5121 | pci_bus_restore(dev->subordinate); | 5194 | pci_bus_restore_locked(dev->subordinate); |
| 5122 | } | 5195 | } |
| 5123 | } | 5196 | } |
| 5124 | 5197 | ||
| @@ -5177,17 +5250,15 @@ static int __pci_reset_slot(struct pci_slot *slot) | |||
| 5177 | if (rc) | 5250 | if (rc) |
| 5178 | return rc; | 5251 | return rc; |
| 5179 | 5252 | ||
| 5180 | pci_slot_save_and_disable(slot); | ||
| 5181 | |||
| 5182 | if (pci_slot_trylock(slot)) { | 5253 | if (pci_slot_trylock(slot)) { |
| 5254 | pci_slot_save_and_disable_locked(slot); | ||
| 5183 | might_sleep(); | 5255 | might_sleep(); |
| 5184 | rc = pci_reset_hotplug_slot(slot->hotplug, 0); | 5256 | rc = pci_reset_hotplug_slot(slot->hotplug, 0); |
| 5257 | pci_slot_restore_locked(slot); | ||
| 5185 | pci_slot_unlock(slot); | 5258 | pci_slot_unlock(slot); |
| 5186 | } else | 5259 | } else |
| 5187 | rc = -EAGAIN; | 5260 | rc = -EAGAIN; |
| 5188 | 5261 | ||
| 5189 | pci_slot_restore(slot); | ||
| 5190 | |||
| 5191 | return rc; | 5262 | return rc; |
| 5192 | } | 5263 | } |
| 5193 | 5264 | ||
| @@ -5273,17 +5344,15 @@ static int __pci_reset_bus(struct pci_bus *bus) | |||
| 5273 | if (rc) | 5344 | if (rc) |
| 5274 | return rc; | 5345 | return rc; |
| 5275 | 5346 | ||
| 5276 | pci_bus_save_and_disable(bus); | ||
| 5277 | |||
| 5278 | if (pci_bus_trylock(bus)) { | 5347 | if (pci_bus_trylock(bus)) { |
| 5348 | pci_bus_save_and_disable_locked(bus); | ||
| 5279 | might_sleep(); | 5349 | might_sleep(); |
| 5280 | rc = pci_bridge_secondary_bus_reset(bus->self); | 5350 | rc = pci_bridge_secondary_bus_reset(bus->self); |
| 5351 | pci_bus_restore_locked(bus); | ||
| 5281 | pci_bus_unlock(bus); | 5352 | pci_bus_unlock(bus); |
| 5282 | } else | 5353 | } else |
| 5283 | rc = -EAGAIN; | 5354 | rc = -EAGAIN; |
| 5284 | 5355 | ||
| 5285 | pci_bus_restore(bus); | ||
| 5286 | |||
| 5287 | return rc; | 5356 | return rc; |
| 5288 | } | 5357 | } |
| 5289 | 5358 | ||
| @@ -6000,8 +6069,7 @@ void pci_reassigndev_resource_alignment(struct pci_dev *dev) | |||
| 6000 | * to enable the kernel to reassign new resource | 6069 | * to enable the kernel to reassign new resource |
| 6001 | * window later on. | 6070 | * window later on. |
| 6002 | */ | 6071 | */ |
| 6003 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && | 6072 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { |
| 6004 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | ||
| 6005 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { | 6073 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { |
| 6006 | r = &dev->resource[i]; | 6074 | r = &dev->resource[i]; |
| 6007 | if (!(r->flags & IORESOURCE_MEM)) | 6075 | if (!(r->flags & IORESOURCE_MEM)) |
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 44742b2e1126..5cbdbca904ac 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig | |||
| @@ -6,10 +6,9 @@ config PCIEPORTBUS | |||
| 6 | bool "PCI Express Port Bus support" | 6 | bool "PCI Express Port Bus support" |
| 7 | depends on PCI | 7 | depends on PCI |
| 8 | help | 8 | help |
| 9 | This automatically enables PCI Express Port Bus support. Users can | 9 | This enables PCI Express Port Bus support. Users can then enable |
| 10 | choose Native Hot-Plug support, Advanced Error Reporting support, | 10 | support for Native Hot-Plug, Advanced Error Reporting, Power |
| 11 | Power Management Event support and Virtual Channel support to run | 11 | Management Events, and Downstream Port Containment. |
| 12 | on PCI Express Ports (Root or Switch). | ||
| 13 | 12 | ||
| 14 | # | 13 | # |
| 15 | # Include service Kconfig here | 14 | # Include service Kconfig here |
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index ab514083d5d4..f1d7bc1e5efa 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | # Makefile for PCI Express features and port driver | 3 | # Makefile for PCI Express features and port driver |
| 4 | 4 | ||
| 5 | pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o | 5 | pcieportdrv-y := portdrv_core.o portdrv_pci.o err.o |
| 6 | pcieportdrv-y += bw_notification.o | ||
| 6 | 7 | ||
| 7 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o | 8 | obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o |
| 8 | 9 | ||
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index fed29de783e0..f8fc2114ad39 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c | |||
| @@ -117,7 +117,7 @@ bool pci_aer_available(void) | |||
| 117 | 117 | ||
| 118 | static int ecrc_policy = ECRC_POLICY_DEFAULT; | 118 | static int ecrc_policy = ECRC_POLICY_DEFAULT; |
| 119 | 119 | ||
| 120 | static const char *ecrc_policy_str[] = { | 120 | static const char * const ecrc_policy_str[] = { |
| 121 | [ECRC_POLICY_DEFAULT] = "bios", | 121 | [ECRC_POLICY_DEFAULT] = "bios", |
| 122 | [ECRC_POLICY_OFF] = "off", | 122 | [ECRC_POLICY_OFF] = "off", |
| 123 | [ECRC_POLICY_ON] = "on" | 123 | [ECRC_POLICY_ON] = "on" |
| @@ -203,11 +203,8 @@ void pcie_ecrc_get_policy(char *str) | |||
| 203 | { | 203 | { |
| 204 | int i; | 204 | int i; |
| 205 | 205 | ||
| 206 | for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) | 206 | i = match_string(ecrc_policy_str, ARRAY_SIZE(ecrc_policy_str), str); |
| 207 | if (!strncmp(str, ecrc_policy_str[i], | 207 | if (i < 0) |
| 208 | strlen(ecrc_policy_str[i]))) | ||
| 209 | break; | ||
| 210 | if (i >= ARRAY_SIZE(ecrc_policy_str)) | ||
| 211 | return; | 208 | return; |
| 212 | 209 | ||
| 213 | ecrc_policy = i; | 210 | ecrc_policy = i; |
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c new file mode 100644 index 000000000000..d2eae3b7cc0f --- /dev/null +++ b/drivers/pci/pcie/bw_notification.c | |||
| @@ -0,0 +1,110 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * PCI Express Link Bandwidth Notification services driver | ||
| 4 | * Author: Alexandru Gagniuc <mr.nuke.me@gmail.com> | ||
| 5 | * | ||
| 6 | * Copyright (C) 2019, Dell Inc | ||
| 7 | * | ||
| 8 | * The PCIe Link Bandwidth Notification provides a way to notify the | ||
| 9 | * operating system when the link width or data rate changes. This | ||
| 10 | * capability is required for all root ports and downstream ports | ||
| 11 | * supporting links wider than x1 and/or multiple link speeds. | ||
| 12 | * | ||
| 13 | * This service port driver hooks into the bandwidth notification interrupt | ||
| 14 | * and warns when links become degraded in operation. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include "../pci.h" | ||
| 18 | #include "portdrv.h" | ||
| 19 | |||
| 20 | static bool pcie_link_bandwidth_notification_supported(struct pci_dev *dev) | ||
| 21 | { | ||
| 22 | int ret; | ||
| 23 | u32 lnk_cap; | ||
| 24 | |||
| 25 | ret = pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnk_cap); | ||
| 26 | return (ret == PCIBIOS_SUCCESSFUL) && (lnk_cap & PCI_EXP_LNKCAP_LBNC); | ||
| 27 | } | ||
| 28 | |||
| 29 | static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev) | ||
| 30 | { | ||
| 31 | u16 lnk_ctl; | ||
| 32 | |||
| 33 | pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); | ||
| 34 | lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; | ||
| 35 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); | ||
| 36 | } | ||
| 37 | |||
| 38 | static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev) | ||
| 39 | { | ||
| 40 | u16 lnk_ctl; | ||
| 41 | |||
| 42 | pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); | ||
| 43 | lnk_ctl &= ~PCI_EXP_LNKCTL_LBMIE; | ||
| 44 | pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); | ||
| 45 | } | ||
| 46 | |||
| 47 | static irqreturn_t pcie_bw_notification_handler(int irq, void *context) | ||
| 48 | { | ||
| 49 | struct pcie_device *srv = context; | ||
| 50 | struct pci_dev *port = srv->port; | ||
| 51 | struct pci_dev *dev; | ||
| 52 | u16 link_status, events; | ||
| 53 | int ret; | ||
| 54 | |||
| 55 | ret = pcie_capability_read_word(port, PCI_EXP_LNKSTA, &link_status); | ||
| 56 | events = link_status & PCI_EXP_LNKSTA_LBMS; | ||
| 57 | |||
| 58 | if (ret != PCIBIOS_SUCCESSFUL || !events) | ||
| 59 | return IRQ_NONE; | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Print status from downstream devices, not this root port or | ||
| 63 | * downstream switch port. | ||
| 64 | */ | ||
| 65 | down_read(&pci_bus_sem); | ||
| 66 | list_for_each_entry(dev, &port->subordinate->devices, bus_list) | ||
| 67 | __pcie_print_link_status(dev, false); | ||
| 68 | up_read(&pci_bus_sem); | ||
| 69 | |||
| 70 | pcie_update_link_speed(port->subordinate, link_status); | ||
| 71 | pcie_capability_write_word(port, PCI_EXP_LNKSTA, events); | ||
| 72 | return IRQ_HANDLED; | ||
| 73 | } | ||
| 74 | |||
| 75 | static int pcie_bandwidth_notification_probe(struct pcie_device *srv) | ||
| 76 | { | ||
| 77 | int ret; | ||
| 78 | |||
| 79 | /* Single-width or single-speed ports do not have to support this. */ | ||
| 80 | if (!pcie_link_bandwidth_notification_supported(srv->port)) | ||
| 81 | return -ENODEV; | ||
| 82 | |||
| 83 | ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, | ||
| 84 | IRQF_SHARED, "PCIe BW notif", srv); | ||
| 85 | if (ret) | ||
| 86 | return ret; | ||
| 87 | |||
| 88 | pcie_enable_link_bandwidth_notification(srv->port); | ||
| 89 | |||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static void pcie_bandwidth_notification_remove(struct pcie_device *srv) | ||
| 94 | { | ||
| 95 | pcie_disable_link_bandwidth_notification(srv->port); | ||
| 96 | free_irq(srv->irq, srv); | ||
| 97 | } | ||
| 98 | |||
| 99 | static struct pcie_port_service_driver pcie_bandwidth_notification_driver = { | ||
| 100 | .name = "pcie_bw_notification", | ||
| 101 | .port_type = PCIE_ANY_PORT, | ||
| 102 | .service = PCIE_PORT_SERVICE_BWNOTIF, | ||
| 103 | .probe = pcie_bandwidth_notification_probe, | ||
| 104 | .remove = pcie_bandwidth_notification_remove, | ||
| 105 | }; | ||
| 106 | |||
| 107 | int __init pcie_bandwidth_notification_init(void) | ||
| 108 | { | ||
| 109 | return pcie_port_service_register(&pcie_bandwidth_notification_driver); | ||
| 110 | } | ||
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index e435d12e61a0..7b77754a82de 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c | |||
| @@ -202,6 +202,28 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) | |||
| 202 | pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); | 202 | pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev, | ||
| 206 | struct aer_err_info *info) | ||
| 207 | { | ||
| 208 | int pos = dev->aer_cap; | ||
| 209 | u32 status, mask, sev; | ||
| 210 | |||
| 211 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | ||
| 212 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); | ||
| 213 | status &= ~mask; | ||
| 214 | if (!status) | ||
| 215 | return 0; | ||
| 216 | |||
| 217 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); | ||
| 218 | status &= sev; | ||
| 219 | if (status) | ||
| 220 | info->severity = AER_FATAL; | ||
| 221 | else | ||
| 222 | info->severity = AER_NONFATAL; | ||
| 223 | |||
| 224 | return 1; | ||
| 225 | } | ||
| 226 | |||
| 205 | static irqreturn_t dpc_handler(int irq, void *context) | 227 | static irqreturn_t dpc_handler(int irq, void *context) |
| 206 | { | 228 | { |
| 207 | struct aer_err_info info; | 229 | struct aer_err_info info; |
| @@ -229,9 +251,12 @@ static irqreturn_t dpc_handler(int irq, void *context) | |||
| 229 | /* show RP PIO error detail information */ | 251 | /* show RP PIO error detail information */ |
| 230 | if (dpc->rp_extensions && reason == 3 && ext_reason == 0) | 252 | if (dpc->rp_extensions && reason == 3 && ext_reason == 0) |
| 231 | dpc_process_rp_pio_error(dpc); | 253 | dpc_process_rp_pio_error(dpc); |
| 232 | else if (reason == 0 && aer_get_device_error_info(pdev, &info)) { | 254 | else if (reason == 0 && |
| 255 | dpc_get_aer_uncorrect_severity(pdev, &info) && | ||
| 256 | aer_get_device_error_info(pdev, &info)) { | ||
| 233 | aer_print_error(pdev, &info); | 257 | aer_print_error(pdev, &info); |
| 234 | pci_cleanup_aer_uncorrect_error_status(pdev); | 258 | pci_cleanup_aer_uncorrect_error_status(pdev); |
| 259 | pci_aer_clear_fatal_status(pdev); | ||
| 235 | } | 260 | } |
| 236 | 261 | ||
| 237 | /* We configure DPC so it only triggers on ERR_FATAL */ | 262 | /* We configure DPC so it only triggers on ERR_FATAL */ |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 0dbcf429089f..54d593d10396 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
| @@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus) | |||
| 363 | return false; | 363 | return false; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | static void pcie_pme_disable_interrupt(struct pci_dev *port, | ||
| 367 | struct pcie_pme_service_data *data) | ||
| 368 | { | ||
| 369 | spin_lock_irq(&data->lock); | ||
| 370 | pcie_pme_interrupt_enable(port, false); | ||
| 371 | pcie_clear_root_pme_status(port); | ||
| 372 | data->noirq = true; | ||
| 373 | spin_unlock_irq(&data->lock); | ||
| 374 | } | ||
| 375 | |||
| 366 | /** | 376 | /** |
| 367 | * pcie_pme_suspend - Suspend PCIe PME service device. | 377 | * pcie_pme_suspend - Suspend PCIe PME service device. |
| 368 | * @srv: PCIe service device to suspend. | 378 | * @srv: PCIe service device to suspend. |
| @@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
| 387 | return 0; | 397 | return 0; |
| 388 | } | 398 | } |
| 389 | 399 | ||
| 390 | spin_lock_irq(&data->lock); | 400 | pcie_pme_disable_interrupt(port, data); |
| 391 | pcie_pme_interrupt_enable(port, false); | ||
| 392 | pcie_clear_root_pme_status(port); | ||
| 393 | data->noirq = true; | ||
| 394 | spin_unlock_irq(&data->lock); | ||
| 395 | 401 | ||
| 396 | synchronize_irq(srv->irq); | 402 | synchronize_irq(srv->irq); |
| 397 | 403 | ||
| @@ -427,34 +433,12 @@ static int pcie_pme_resume(struct pcie_device *srv) | |||
| 427 | */ | 433 | */ |
| 428 | static void pcie_pme_remove(struct pcie_device *srv) | 434 | static void pcie_pme_remove(struct pcie_device *srv) |
| 429 | { | 435 | { |
| 430 | pcie_pme_suspend(srv); | ||
| 431 | free_irq(srv->irq, srv); | ||
| 432 | kfree(get_service_data(srv)); | ||
| 433 | } | ||
| 434 | |||
| 435 | static int pcie_pme_runtime_suspend(struct pcie_device *srv) | ||
| 436 | { | ||
| 437 | struct pcie_pme_service_data *data = get_service_data(srv); | ||
| 438 | |||
| 439 | spin_lock_irq(&data->lock); | ||
| 440 | pcie_pme_interrupt_enable(srv->port, false); | ||
| 441 | pcie_clear_root_pme_status(srv->port); | ||
| 442 | data->noirq = true; | ||
| 443 | spin_unlock_irq(&data->lock); | ||
| 444 | |||
| 445 | return 0; | ||
| 446 | } | ||
| 447 | |||
| 448 | static int pcie_pme_runtime_resume(struct pcie_device *srv) | ||
| 449 | { | ||
| 450 | struct pcie_pme_service_data *data = get_service_data(srv); | 436 | struct pcie_pme_service_data *data = get_service_data(srv); |
| 451 | 437 | ||
| 452 | spin_lock_irq(&data->lock); | 438 | pcie_pme_disable_interrupt(srv->port, data); |
| 453 | pcie_pme_interrupt_enable(srv->port, true); | 439 | free_irq(srv->irq, srv); |
| 454 | data->noirq = false; | 440 | cancel_work_sync(&data->work); |
| 455 | spin_unlock_irq(&data->lock); | 441 | kfree(data); |
| 456 | |||
| 457 | return 0; | ||
| 458 | } | 442 | } |
| 459 | 443 | ||
| 460 | static struct pcie_port_service_driver pcie_pme_driver = { | 444 | static struct pcie_port_service_driver pcie_pme_driver = { |
| @@ -464,8 +448,6 @@ static struct pcie_port_service_driver pcie_pme_driver = { | |||
| 464 | 448 | ||
| 465 | .probe = pcie_pme_probe, | 449 | .probe = pcie_pme_probe, |
| 466 | .suspend = pcie_pme_suspend, | 450 | .suspend = pcie_pme_suspend, |
| 467 | .runtime_suspend = pcie_pme_runtime_suspend, | ||
| 468 | .runtime_resume = pcie_pme_runtime_resume, | ||
| 469 | .resume = pcie_pme_resume, | 451 | .resume = pcie_pme_resume, |
| 470 | .remove = pcie_pme_remove, | 452 | .remove = pcie_pme_remove, |
| 471 | }; | 453 | }; |
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index fbbf00b0992e..1d50dc58ac40 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
| @@ -20,8 +20,10 @@ | |||
| 20 | #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) | 20 | #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) |
| 21 | #define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */ | 21 | #define PCIE_PORT_SERVICE_DPC_SHIFT 3 /* Downstream Port Containment */ |
| 22 | #define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) | 22 | #define PCIE_PORT_SERVICE_DPC (1 << PCIE_PORT_SERVICE_DPC_SHIFT) |
| 23 | #define PCIE_PORT_SERVICE_BWNOTIF_SHIFT 4 /* Bandwidth notification */ | ||
| 24 | #define PCIE_PORT_SERVICE_BWNOTIF (1 << PCIE_PORT_SERVICE_BWNOTIF_SHIFT) | ||
| 23 | 25 | ||
| 24 | #define PCIE_PORT_DEVICE_MAXSERVICES 4 | 26 | #define PCIE_PORT_DEVICE_MAXSERVICES 5 |
| 25 | 27 | ||
| 26 | #ifdef CONFIG_PCIEAER | 28 | #ifdef CONFIG_PCIEAER |
| 27 | int pcie_aer_init(void); | 29 | int pcie_aer_init(void); |
| @@ -47,6 +49,8 @@ int pcie_dpc_init(void); | |||
| 47 | static inline int pcie_dpc_init(void) { return 0; } | 49 | static inline int pcie_dpc_init(void) { return 0; } |
| 48 | #endif | 50 | #endif |
| 49 | 51 | ||
| 52 | int pcie_bandwidth_notification_init(void); | ||
| 53 | |||
| 50 | /* Port Type */ | 54 | /* Port Type */ |
| 51 | #define PCIE_ANY_PORT (~0) | 55 | #define PCIE_ANY_PORT (~0) |
| 52 | 56 | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index f458ac9cb70c..7d04f9d087a6 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
| @@ -99,7 +99,7 @@ static int pcie_message_numbers(struct pci_dev *dev, int mask, | |||
| 99 | */ | 99 | */ |
| 100 | static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) | 100 | static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) |
| 101 | { | 101 | { |
| 102 | int nr_entries, nvec; | 102 | int nr_entries, nvec, pcie_irq; |
| 103 | u32 pme = 0, aer = 0, dpc = 0; | 103 | u32 pme = 0, aer = 0, dpc = 0; |
| 104 | 104 | ||
| 105 | /* Allocate the maximum possible number of MSI/MSI-X vectors */ | 105 | /* Allocate the maximum possible number of MSI/MSI-X vectors */ |
| @@ -135,10 +135,13 @@ static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) | |||
| 135 | return nr_entries; | 135 | return nr_entries; |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | /* PME and hotplug share an MSI/MSI-X vector */ | 138 | /* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */ |
| 139 | if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { | 139 | if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | |
| 140 | irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, pme); | 140 | PCIE_PORT_SERVICE_BWNOTIF)) { |
| 141 | irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, pme); | 141 | pcie_irq = pci_irq_vector(dev, pme); |
| 142 | irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pcie_irq; | ||
| 143 | irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pcie_irq; | ||
| 144 | irqs[PCIE_PORT_SERVICE_BWNOTIF_SHIFT] = pcie_irq; | ||
| 142 | } | 145 | } |
| 143 | 146 | ||
| 144 | if (mask & PCIE_PORT_SERVICE_AER) | 147 | if (mask & PCIE_PORT_SERVICE_AER) |
| @@ -250,6 +253,10 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
| 250 | pci_aer_available() && services & PCIE_PORT_SERVICE_AER) | 253 | pci_aer_available() && services & PCIE_PORT_SERVICE_AER) |
| 251 | services |= PCIE_PORT_SERVICE_DPC; | 254 | services |= PCIE_PORT_SERVICE_DPC; |
| 252 | 255 | ||
| 256 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || | ||
| 257 | pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) | ||
| 258 | services |= PCIE_PORT_SERVICE_BWNOTIF; | ||
| 259 | |||
| 253 | return services; | 260 | return services; |
| 254 | } | 261 | } |
| 255 | 262 | ||
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 0acca3596807..0a87091a0800 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
| @@ -182,10 +182,12 @@ static void pcie_portdrv_err_resume(struct pci_dev *dev) | |||
| 182 | /* | 182 | /* |
| 183 | * LINUX Device Driver Model | 183 | * LINUX Device Driver Model |
| 184 | */ | 184 | */ |
| 185 | static const struct pci_device_id port_pci_ids[] = { { | 185 | static const struct pci_device_id port_pci_ids[] = { |
| 186 | /* handle any PCI-Express port */ | 186 | /* handle any PCI-Express port */ |
| 187 | PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), | 187 | { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0) }, |
| 188 | }, { /* end: all zeroes */ } | 188 | /* subtractive decode PCI-to-PCI bridge, class type is 060401h */ |
| 189 | { PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x01), ~0) }, | ||
| 190 | { }, | ||
| 189 | }; | 191 | }; |
| 190 | 192 | ||
| 191 | static const struct pci_error_handlers pcie_portdrv_err_handler = { | 193 | static const struct pci_error_handlers pcie_portdrv_err_handler = { |
| @@ -238,6 +240,7 @@ static void __init pcie_init_services(void) | |||
| 238 | pcie_pme_init(); | 240 | pcie_pme_init(); |
| 239 | pcie_dpc_init(); | 241 | pcie_dpc_init(); |
| 240 | pcie_hp_init(); | 242 | pcie_hp_init(); |
| 243 | pcie_bandwidth_notification_init(); | ||
| 241 | } | 244 | } |
| 242 | 245 | ||
| 243 | static int __init pcie_portdrv_init(void) | 246 | static int __init pcie_portdrv_init(void) |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 257b9f6f2ebb..2ec0df04e0dc 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -121,13 +121,13 @@ static u64 pci_size(u64 base, u64 maxbase, u64 mask) | |||
| 121 | * Get the lowest of them to find the decode size, and from that | 121 | * Get the lowest of them to find the decode size, and from that |
| 122 | * the extent. | 122 | * the extent. |
| 123 | */ | 123 | */ |
| 124 | size = (size & ~(size-1)) - 1; | 124 | size = size & ~(size-1); |
| 125 | 125 | ||
| 126 | /* | 126 | /* |
| 127 | * base == maxbase can be valid only if the BAR has already been | 127 | * base == maxbase can be valid only if the BAR has already been |
| 128 | * programmed with all 1s. | 128 | * programmed with all 1s. |
| 129 | */ | 129 | */ |
| 130 | if (base == maxbase && ((base | size) & mask) != mask) | 130 | if (base == maxbase && ((base | (size - 1)) & mask) != mask) |
| 131 | return 0; | 131 | return 0; |
| 132 | 132 | ||
| 133 | return size; | 133 | return size; |
| @@ -278,7 +278,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 278 | /* Above 32-bit boundary; try to reallocate */ | 278 | /* Above 32-bit boundary; try to reallocate */ |
| 279 | res->flags |= IORESOURCE_UNSET; | 279 | res->flags |= IORESOURCE_UNSET; |
| 280 | res->start = 0; | 280 | res->start = 0; |
| 281 | res->end = sz64; | 281 | res->end = sz64 - 1; |
| 282 | pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", | 282 | pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", |
| 283 | pos, (unsigned long long)l64); | 283 | pos, (unsigned long long)l64); |
| 284 | goto out; | 284 | goto out; |
| @@ -286,7 +286,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, | |||
| 286 | } | 286 | } |
| 287 | 287 | ||
| 288 | region.start = l64; | 288 | region.start = l64; |
| 289 | region.end = l64 + sz64; | 289 | region.end = l64 + sz64 - 1; |
| 290 | 290 | ||
| 291 | pcibios_bus_to_resource(dev->bus, res, ®ion); | 291 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
| 292 | pcibios_resource_to_bus(dev->bus, &inverted_region, res); | 292 | pcibios_resource_to_bus(dev->bus, &inverted_region, res); |
| @@ -348,6 +348,57 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) | |||
| 348 | } | 348 | } |
| 349 | } | 349 | } |
| 350 | 350 | ||
| 351 | static void pci_read_bridge_windows(struct pci_dev *bridge) | ||
| 352 | { | ||
| 353 | u16 io; | ||
| 354 | u32 pmem, tmp; | ||
| 355 | |||
| 356 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | ||
| 357 | if (!io) { | ||
| 358 | pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); | ||
| 359 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | ||
| 360 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); | ||
| 361 | } | ||
| 362 | if (io) | ||
| 363 | bridge->io_window = 1; | ||
| 364 | |||
| 365 | /* | ||
| 366 | * DECchip 21050 pass 2 errata: the bridge may miss an address | ||
| 367 | * disconnect boundary by one PCI data phase. Workaround: do not | ||
| 368 | * use prefetching on this device. | ||
| 369 | */ | ||
| 370 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) | ||
| 371 | return; | ||
| 372 | |||
| 373 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | ||
| 374 | if (!pmem) { | ||
| 375 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, | ||
| 376 | 0xffe0fff0); | ||
| 377 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | ||
| 378 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); | ||
| 379 | } | ||
| 380 | if (!pmem) | ||
| 381 | return; | ||
| 382 | |||
| 383 | bridge->pref_window = 1; | ||
| 384 | |||
| 385 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { | ||
| 386 | |||
| 387 | /* | ||
| 388 | * Bridge claims to have a 64-bit prefetchable memory | ||
| 389 | * window; verify that the upper bits are actually | ||
| 390 | * writable. | ||
| 391 | */ | ||
| 392 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); | ||
| 393 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, | ||
| 394 | 0xffffffff); | ||
| 395 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); | ||
| 396 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); | ||
| 397 | if (tmp) | ||
| 398 | bridge->pref_64_window = 1; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | |||
| 351 | static void pci_read_bridge_io(struct pci_bus *child) | 402 | static void pci_read_bridge_io(struct pci_bus *child) |
| 352 | { | 403 | { |
| 353 | struct pci_dev *dev = child->self; | 404 | struct pci_dev *dev = child->self; |
| @@ -1728,9 +1779,6 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 1728 | break; | 1779 | break; |
| 1729 | 1780 | ||
| 1730 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ | 1781 | case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ |
| 1731 | if (class != PCI_CLASS_BRIDGE_PCI) | ||
| 1732 | goto bad; | ||
| 1733 | |||
| 1734 | /* | 1782 | /* |
| 1735 | * The PCI-to-PCI bridge spec requires that subtractive | 1783 | * The PCI-to-PCI bridge spec requires that subtractive |
| 1736 | * decoding (i.e. transparent) bridge must have programming | 1784 | * decoding (i.e. transparent) bridge must have programming |
| @@ -1739,6 +1787,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 1739 | pci_read_irq(dev); | 1787 | pci_read_irq(dev); |
| 1740 | dev->transparent = ((dev->class & 0xff) == 1); | 1788 | dev->transparent = ((dev->class & 0xff) == 1); |
| 1741 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); | 1789 | pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); |
| 1790 | pci_read_bridge_windows(dev); | ||
| 1742 | set_pcie_hotplug_bridge(dev); | 1791 | set_pcie_hotplug_bridge(dev); |
| 1743 | pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); | 1792 | pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); |
| 1744 | if (pos) { | 1793 | if (pos) { |
| @@ -1856,8 +1905,6 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) | |||
| 1856 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, | 1905 | pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, |
| 1857 | hpp->latency_timer); | 1906 | hpp->latency_timer); |
| 1858 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); | 1907 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); |
| 1859 | if (hpp->enable_serr) | ||
| 1860 | pci_bctl |= PCI_BRIDGE_CTL_SERR; | ||
| 1861 | if (hpp->enable_perr) | 1908 | if (hpp->enable_perr) |
| 1862 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; | 1909 | pci_bctl |= PCI_BRIDGE_CTL_PARITY; |
| 1863 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); | 1910 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); |
| @@ -2071,11 +2118,8 @@ static void pci_configure_ltr(struct pci_dev *dev) | |||
| 2071 | { | 2118 | { |
| 2072 | #ifdef CONFIG_PCIEASPM | 2119 | #ifdef CONFIG_PCIEASPM |
| 2073 | struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); | 2120 | struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); |
| 2074 | u32 cap; | ||
| 2075 | struct pci_dev *bridge; | 2121 | struct pci_dev *bridge; |
| 2076 | 2122 | u32 cap, ctl; | |
| 2077 | if (!host->native_ltr) | ||
| 2078 | return; | ||
| 2079 | 2123 | ||
| 2080 | if (!pci_is_pcie(dev)) | 2124 | if (!pci_is_pcie(dev)) |
| 2081 | return; | 2125 | return; |
| @@ -2084,22 +2128,35 @@ static void pci_configure_ltr(struct pci_dev *dev) | |||
| 2084 | if (!(cap & PCI_EXP_DEVCAP2_LTR)) | 2128 | if (!(cap & PCI_EXP_DEVCAP2_LTR)) |
| 2085 | return; | 2129 | return; |
| 2086 | 2130 | ||
| 2087 | /* | 2131 | pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl); |
| 2088 | * Software must not enable LTR in an Endpoint unless the Root | 2132 | if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { |
| 2089 | * Complex and all intermediate Switches indicate support for LTR. | 2133 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { |
| 2090 | * PCIe r3.1, sec 6.18. | 2134 | dev->ltr_path = 1; |
| 2091 | */ | 2135 | return; |
| 2092 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) | 2136 | } |
| 2093 | dev->ltr_path = 1; | 2137 | |
| 2094 | else { | ||
| 2095 | bridge = pci_upstream_bridge(dev); | 2138 | bridge = pci_upstream_bridge(dev); |
| 2096 | if (bridge && bridge->ltr_path) | 2139 | if (bridge && bridge->ltr_path) |
| 2097 | dev->ltr_path = 1; | 2140 | dev->ltr_path = 1; |
| 2141 | |||
| 2142 | return; | ||
| 2098 | } | 2143 | } |
| 2099 | 2144 | ||
| 2100 | if (dev->ltr_path) | 2145 | if (!host->native_ltr) |
| 2146 | return; | ||
| 2147 | |||
| 2148 | /* | ||
| 2149 | * Software must not enable LTR in an Endpoint unless the Root | ||
| 2150 | * Complex and all intermediate Switches indicate support for LTR. | ||
| 2151 | * PCIe r4.0, sec 6.18. | ||
| 2152 | */ | ||
| 2153 | if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || | ||
| 2154 | ((bridge = pci_upstream_bridge(dev)) && | ||
| 2155 | bridge->ltr_path)) { | ||
| 2101 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, | 2156 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, |
| 2102 | PCI_EXP_DEVCTL2_LTR_EN); | 2157 | PCI_EXP_DEVCTL2_LTR_EN); |
| 2158 | dev->ltr_path = 1; | ||
| 2159 | } | ||
| 2103 | #endif | 2160 | #endif |
| 2104 | } | 2161 | } |
| 2105 | 2162 | ||
| @@ -2129,6 +2186,24 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev) | |||
| 2129 | #endif | 2186 | #endif |
| 2130 | } | 2187 | } |
| 2131 | 2188 | ||
| 2189 | static void pci_configure_serr(struct pci_dev *dev) | ||
| 2190 | { | ||
| 2191 | u16 control; | ||
| 2192 | |||
| 2193 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | ||
| 2194 | |||
| 2195 | /* | ||
| 2196 | * A bridge will not forward ERR_ messages coming from an | ||
| 2197 | * endpoint unless SERR# forwarding is enabled. | ||
| 2198 | */ | ||
| 2199 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control); | ||
| 2200 | if (!(control & PCI_BRIDGE_CTL_SERR)) { | ||
| 2201 | control |= PCI_BRIDGE_CTL_SERR; | ||
| 2202 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control); | ||
| 2203 | } | ||
| 2204 | } | ||
| 2205 | } | ||
| 2206 | |||
| 2132 | static void pci_configure_device(struct pci_dev *dev) | 2207 | static void pci_configure_device(struct pci_dev *dev) |
| 2133 | { | 2208 | { |
| 2134 | struct hotplug_params hpp; | 2209 | struct hotplug_params hpp; |
| @@ -2139,6 +2214,7 @@ static void pci_configure_device(struct pci_dev *dev) | |||
| 2139 | pci_configure_relaxed_ordering(dev); | 2214 | pci_configure_relaxed_ordering(dev); |
| 2140 | pci_configure_ltr(dev); | 2215 | pci_configure_ltr(dev); |
| 2141 | pci_configure_eetlp_prefix(dev); | 2216 | pci_configure_eetlp_prefix(dev); |
| 2217 | pci_configure_serr(dev); | ||
| 2142 | 2218 | ||
| 2143 | memset(&hpp, 0, sizeof(hpp)); | 2219 | memset(&hpp, 0, sizeof(hpp)); |
| 2144 | ret = pci_get_hp_params(dev, &hpp); | 2220 | ret = pci_get_hp_params(dev, &hpp); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..6bafd0fe0f0b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -2138,7 +2138,7 @@ static void quirk_netmos(struct pci_dev *dev) | |||
| 2138 | if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && | 2138 | if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && |
| 2139 | dev->subsystem_device == 0x0299) | 2139 | dev->subsystem_device == 0x0299) |
| 2140 | return; | 2140 | return; |
| 2141 | /* else: fall through */ | 2141 | /* else, fall through */ |
| 2142 | case PCI_DEVICE_ID_NETMOS_9735: | 2142 | case PCI_DEVICE_ID_NETMOS_9735: |
| 2143 | case PCI_DEVICE_ID_NETMOS_9745: | 2143 | case PCI_DEVICE_ID_NETMOS_9745: |
| 2144 | case PCI_DEVICE_ID_NETMOS_9845: | 2144 | case PCI_DEVICE_ID_NETMOS_9845: |
| @@ -4519,6 +4519,8 @@ static const struct pci_dev_acs_enabled { | |||
| 4519 | /* QCOM QDF2xxx root ports */ | 4519 | /* QCOM QDF2xxx root ports */ |
| 4520 | { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, | 4520 | { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, |
| 4521 | { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, | 4521 | { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, |
| 4522 | /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */ | ||
| 4523 | { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs }, | ||
| 4522 | /* Intel PCH root ports */ | 4524 | /* Intel PCH root ports */ |
| 4523 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, | 4525 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, |
| 4524 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, | 4526 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ed960436df5e..ec44a0f3a7ac 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -735,58 +735,21 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) | |||
| 735 | base/limit registers must be read-only and read as 0. */ | 735 | base/limit registers must be read-only and read as 0. */ |
| 736 | static void pci_bridge_check_ranges(struct pci_bus *bus) | 736 | static void pci_bridge_check_ranges(struct pci_bus *bus) |
| 737 | { | 737 | { |
| 738 | u16 io; | ||
| 739 | u32 pmem; | ||
| 740 | struct pci_dev *bridge = bus->self; | 738 | struct pci_dev *bridge = bus->self; |
| 741 | struct resource *b_res; | 739 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; |
| 742 | 740 | ||
| 743 | b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | ||
| 744 | b_res[1].flags |= IORESOURCE_MEM; | 741 | b_res[1].flags |= IORESOURCE_MEM; |
| 745 | 742 | ||
| 746 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | 743 | if (bridge->io_window) |
| 747 | if (!io) { | ||
| 748 | pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); | ||
| 749 | pci_read_config_word(bridge, PCI_IO_BASE, &io); | ||
| 750 | pci_write_config_word(bridge, PCI_IO_BASE, 0x0); | ||
| 751 | } | ||
| 752 | if (io) | ||
| 753 | b_res[0].flags |= IORESOURCE_IO; | 744 | b_res[0].flags |= IORESOURCE_IO; |
| 754 | 745 | ||
| 755 | /* DECchip 21050 pass 2 errata: the bridge may miss an address | 746 | if (bridge->pref_window) { |
| 756 | disconnect boundary by one PCI data phase. | ||
| 757 | Workaround: do not use prefetching on this device. */ | ||
| 758 | if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) | ||
| 759 | return; | ||
| 760 | |||
| 761 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | ||
| 762 | if (!pmem) { | ||
| 763 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, | ||
| 764 | 0xffe0fff0); | ||
| 765 | pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); | ||
| 766 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); | ||
| 767 | } | ||
| 768 | if (pmem) { | ||
| 769 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; | 747 | b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
| 770 | if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == | 748 | if (bridge->pref_64_window) { |
| 771 | PCI_PREF_RANGE_TYPE_64) { | ||
| 772 | b_res[2].flags |= IORESOURCE_MEM_64; | 749 | b_res[2].flags |= IORESOURCE_MEM_64; |
| 773 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; | 750 | b_res[2].flags |= PCI_PREF_RANGE_TYPE_64; |
| 774 | } | 751 | } |
| 775 | } | 752 | } |
| 776 | |||
| 777 | /* double check if bridge does support 64 bit pref */ | ||
| 778 | if (b_res[2].flags & IORESOURCE_MEM_64) { | ||
| 779 | u32 mem_base_hi, tmp; | ||
| 780 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, | ||
| 781 | &mem_base_hi); | ||
| 782 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, | ||
| 783 | 0xffffffff); | ||
| 784 | pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); | ||
| 785 | if (!tmp) | ||
| 786 | b_res[2].flags &= ~IORESOURCE_MEM_64; | ||
| 787 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, | ||
| 788 | mem_base_hi); | ||
| 789 | } | ||
| 790 | } | 753 | } |
| 791 | 754 | ||
| 792 | /* Helper function for sizing routines: find first available | 755 | /* Helper function for sizing routines: find first available |
| @@ -1223,12 +1186,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) | |||
| 1223 | if (!b) | 1186 | if (!b) |
| 1224 | continue; | 1187 | continue; |
| 1225 | 1188 | ||
| 1226 | switch (dev->class >> 8) { | 1189 | switch (dev->hdr_type) { |
| 1227 | case PCI_CLASS_BRIDGE_CARDBUS: | 1190 | case PCI_HEADER_TYPE_CARDBUS: |
| 1228 | pci_bus_size_cardbus(b, realloc_head); | 1191 | pci_bus_size_cardbus(b, realloc_head); |
| 1229 | break; | 1192 | break; |
| 1230 | 1193 | ||
| 1231 | case PCI_CLASS_BRIDGE_PCI: | 1194 | case PCI_HEADER_TYPE_BRIDGE: |
| 1232 | default: | 1195 | default: |
| 1233 | __pci_bus_size_bridges(b, realloc_head); | 1196 | __pci_bus_size_bridges(b, realloc_head); |
| 1234 | break; | 1197 | break; |
| @@ -1239,12 +1202,12 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) | |||
| 1239 | if (pci_is_root_bus(bus)) | 1202 | if (pci_is_root_bus(bus)) |
| 1240 | return; | 1203 | return; |
| 1241 | 1204 | ||
| 1242 | switch (bus->self->class >> 8) { | 1205 | switch (bus->self->hdr_type) { |
| 1243 | case PCI_CLASS_BRIDGE_CARDBUS: | 1206 | case PCI_HEADER_TYPE_CARDBUS: |
| 1244 | /* don't size cardbuses yet. */ | 1207 | /* don't size cardbuses yet. */ |
| 1245 | break; | 1208 | break; |
| 1246 | 1209 | ||
| 1247 | case PCI_CLASS_BRIDGE_PCI: | 1210 | case PCI_HEADER_TYPE_BRIDGE: |
| 1248 | pci_bridge_check_ranges(bus); | 1211 | pci_bridge_check_ranges(bus); |
| 1249 | if (bus->self->is_hotplug_bridge) { | 1212 | if (bus->self->is_hotplug_bridge) { |
| 1250 | additional_io_size = pci_hotplug_io_size; | 1213 | additional_io_size = pci_hotplug_io_size; |
| @@ -1393,13 +1356,13 @@ void __pci_bus_assign_resources(const struct pci_bus *bus, | |||
| 1393 | 1356 | ||
| 1394 | __pci_bus_assign_resources(b, realloc_head, fail_head); | 1357 | __pci_bus_assign_resources(b, realloc_head, fail_head); |
| 1395 | 1358 | ||
| 1396 | switch (dev->class >> 8) { | 1359 | switch (dev->hdr_type) { |
| 1397 | case PCI_CLASS_BRIDGE_PCI: | 1360 | case PCI_HEADER_TYPE_BRIDGE: |
| 1398 | if (!pci_is_enabled(dev)) | 1361 | if (!pci_is_enabled(dev)) |
| 1399 | pci_setup_bridge(b); | 1362 | pci_setup_bridge(b); |
| 1400 | break; | 1363 | break; |
| 1401 | 1364 | ||
| 1402 | case PCI_CLASS_BRIDGE_CARDBUS: | 1365 | case PCI_HEADER_TYPE_CARDBUS: |
| 1403 | pci_setup_cardbus(b); | 1366 | pci_setup_cardbus(b); |
| 1404 | break; | 1367 | break; |
| 1405 | 1368 | ||
