diff options
Diffstat (limited to 'drivers')
27 files changed, 850 insertions, 159 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index bfd4f7c3b1d8..1b49e940a318 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -660,8 +660,8 @@ static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev, | |||
660 | } | 660 | } |
661 | 661 | ||
662 | /* | 662 | /* |
663 | * Look for aliases to or from the given device for exisiting groups. The | 663 | * Look for aliases to or from the given device for existing groups. DMA |
664 | * dma_alias_devfn only supports aliases on the same bus, therefore the search | 664 | * aliases are only supported on the same bus, therefore the search |
665 | * space is quite small (especially since we're really only looking at pcie | 665 | * space is quite small (especially since we're really only looking at pcie |
666 | * device, and therefore only expect multiple slots on the root complex or | 666 | * device, and therefore only expect multiple slots on the root complex or |
667 | * downstream switch ports). It's conceivable though that a pair of | 667 | * downstream switch ports). It's conceivable though that a pair of |
@@ -686,11 +686,7 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, | |||
686 | continue; | 686 | continue; |
687 | 687 | ||
688 | /* We alias them or they alias us */ | 688 | /* We alias them or they alias us */ |
689 | if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | 689 | if (pci_devs_are_dma_aliases(pdev, tmp)) { |
690 | pdev->dma_alias_devfn == tmp->devfn) || | ||
691 | ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && | ||
692 | tmp->dma_alias_devfn == pdev->devfn)) { | ||
693 | |||
694 | group = get_pci_alias_group(tmp, devfns); | 690 | group = get_pci_alias_group(tmp, devfns); |
695 | if (group) { | 691 | if (group) { |
696 | pci_dev_put(tmp); | 692 | pci_dev_put(tmp); |
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 7a0780d56d2d..bd6f11aaefd9 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -72,6 +72,8 @@ config PCI_RCAR_GEN2 | |||
72 | config PCI_RCAR_GEN2_PCIE | 72 | config PCI_RCAR_GEN2_PCIE |
73 | bool "Renesas R-Car PCIe controller" | 73 | bool "Renesas R-Car PCIe controller" |
74 | depends on ARCH_RENESAS || (ARM && COMPILE_TEST) | 74 | depends on ARCH_RENESAS || (ARM && COMPILE_TEST) |
75 | select PCI_MSI | ||
76 | select PCI_MSI_IRQ_DOMAIN | ||
75 | help | 77 | help |
76 | Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. | 78 | Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. |
77 | 79 | ||
@@ -231,4 +233,15 @@ config PCI_HOST_THUNDER_ECAM | |||
231 | help | 233 | help |
232 | Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. | 234 | Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. |
233 | 235 | ||
236 | config PCIE_ARMADA_8K | ||
237 | bool "Marvell Armada-8K PCIe controller" | ||
238 | depends on ARCH_MVEBU | ||
239 | select PCIE_DW | ||
240 | select PCIEPORTBUS | ||
241 | help | ||
242 | Say Y here if you want to enable PCIe controller support on | ||
243 | Armada-8K SoCs. The PCIe controller on Armada-8K is based on | ||
244 | Designware hardware and therefore the driver re-uses the | ||
245 | Designware core functions to implement the driver. | ||
246 | |||
234 | endmenu | 247 | endmenu |
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index d85b5faf9bbc..a6f85e3987c0 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
@@ -28,3 +28,4 @@ obj-$(CONFIG_PCI_HISI) += pcie-hisi.o | |||
28 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o | 28 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o |
29 | obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o | 29 | obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o |
30 | obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o | 30 | obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o |
31 | obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o | ||
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index ed651baa7c50..328c653d7503 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
@@ -2268,11 +2268,6 @@ static int hv_pci_remove(struct hv_device *hdev) | |||
2268 | 2268 | ||
2269 | hbus = hv_get_drvdata(hdev); | 2269 | hbus = hv_get_drvdata(hdev); |
2270 | 2270 | ||
2271 | ret = hv_send_resources_released(hdev); | ||
2272 | if (ret) | ||
2273 | dev_err(&hdev->device, | ||
2274 | "Couldn't send resources released packet(s)\n"); | ||
2275 | |||
2276 | memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); | 2271 | memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); |
2277 | init_completion(&comp_pkt.host_event); | 2272 | init_completion(&comp_pkt.host_event); |
2278 | pkt.teardown_packet.completion_func = hv_pci_generic_compl; | 2273 | pkt.teardown_packet.completion_func = hv_pci_generic_compl; |
@@ -2295,6 +2290,11 @@ static int hv_pci_remove(struct hv_device *hdev) | |||
2295 | pci_unlock_rescan_remove(); | 2290 | pci_unlock_rescan_remove(); |
2296 | } | 2291 | } |
2297 | 2292 | ||
2293 | ret = hv_send_resources_released(hdev); | ||
2294 | if (ret) | ||
2295 | dev_err(&hdev->device, | ||
2296 | "Couldn't send resources released packet(s)\n"); | ||
2297 | |||
2298 | vmbus_close(hdev->channel); | 2298 | vmbus_close(hdev->channel); |
2299 | 2299 | ||
2300 | /* Delete any children which might still exist. */ | 2300 | /* Delete any children which might still exist. */ |
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index eb5a2755a164..b741a36a67f3 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/of_gpio.h> | 21 | #include <linux/of_gpio.h> |
22 | #include <linux/of_device.h> | ||
22 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
23 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
24 | #include <linux/regmap.h> | 25 | #include <linux/regmap.h> |
@@ -31,19 +32,29 @@ | |||
31 | 32 | ||
32 | #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) | 33 | #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) |
33 | 34 | ||
35 | enum imx6_pcie_variants { | ||
36 | IMX6Q, | ||
37 | IMX6SX, | ||
38 | IMX6QP, | ||
39 | }; | ||
40 | |||
34 | struct imx6_pcie { | 41 | struct imx6_pcie { |
35 | struct gpio_desc *reset_gpio; | 42 | int reset_gpio; |
43 | bool gpio_active_high; | ||
36 | struct clk *pcie_bus; | 44 | struct clk *pcie_bus; |
37 | struct clk *pcie_phy; | 45 | struct clk *pcie_phy; |
46 | struct clk *pcie_inbound_axi; | ||
38 | struct clk *pcie; | 47 | struct clk *pcie; |
39 | struct pcie_port pp; | 48 | struct pcie_port pp; |
40 | struct regmap *iomuxc_gpr; | 49 | struct regmap *iomuxc_gpr; |
50 | enum imx6_pcie_variants variant; | ||
41 | void __iomem *mem_base; | 51 | void __iomem *mem_base; |
42 | u32 tx_deemph_gen1; | 52 | u32 tx_deemph_gen1; |
43 | u32 tx_deemph_gen2_3p5db; | 53 | u32 tx_deemph_gen2_3p5db; |
44 | u32 tx_deemph_gen2_6db; | 54 | u32 tx_deemph_gen2_6db; |
45 | u32 tx_swing_full; | 55 | u32 tx_swing_full; |
46 | u32 tx_swing_low; | 56 | u32 tx_swing_low; |
57 | int link_gen; | ||
47 | }; | 58 | }; |
48 | 59 | ||
49 | /* PCIe Root Complex registers (memory-mapped) */ | 60 | /* PCIe Root Complex registers (memory-mapped) */ |
@@ -236,37 +247,93 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp) | |||
236 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | 247 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); |
237 | u32 val, gpr1, gpr12; | 248 | u32 val, gpr1, gpr12; |
238 | 249 | ||
239 | /* | 250 | switch (imx6_pcie->variant) { |
240 | * If the bootloader already enabled the link we need some special | 251 | case IMX6SX: |
241 | * handling to get the core back into a state where it is safe to | 252 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
242 | * touch it for configuration. As there is no dedicated reset signal | 253 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, |
243 | * wired up for MX6QDL, we need to manually force LTSSM into "detect" | 254 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN); |
244 | * state before completely disabling LTSSM, which is a prerequisite | 255 | /* Force PCIe PHY reset */ |
245 | * for core configuration. | 256 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, |
246 | * | 257 | IMX6SX_GPR5_PCIE_BTNRST_RESET, |
247 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong | 258 | IMX6SX_GPR5_PCIE_BTNRST_RESET); |
248 | * indication that the bootloader activated the link. | 259 | break; |
249 | */ | 260 | case IMX6QP: |
250 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1); | 261 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, |
251 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12); | 262 | IMX6Q_GPR1_PCIE_SW_RST, |
263 | IMX6Q_GPR1_PCIE_SW_RST); | ||
264 | break; | ||
265 | case IMX6Q: | ||
266 | /* | ||
267 | * If the bootloader already enabled the link we need some | ||
268 | * special handling to get the core back into a state where | ||
269 | * it is safe to touch it for configuration. As there is | ||
270 | * no dedicated reset signal wired up for MX6QDL, we need | ||
271 | * to manually force LTSSM into "detect" state before | ||
272 | * completely disabling LTSSM, which is a prerequisite for | ||
273 | * core configuration. | ||
274 | * | ||
275 | * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we | ||
276 | * have a strong indication that the bootloader activated | ||
277 | * the link. | ||
278 | */ | ||
279 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1); | ||
280 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12); | ||
281 | |||
282 | if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && | ||
283 | (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { | ||
284 | val = readl(pp->dbi_base + PCIE_PL_PFLR); | ||
285 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; | ||
286 | val |= PCIE_PL_PFLR_FORCE_LINK; | ||
287 | writel(val, pp->dbi_base + PCIE_PL_PFLR); | ||
288 | |||
289 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
290 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | ||
291 | } | ||
292 | |||
293 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
294 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | ||
295 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
296 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | ||
297 | break; | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
252 | 302 | ||
253 | if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && | 303 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) |
254 | (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { | 304 | { |
255 | val = readl(pp->dbi_base + PCIE_PL_PFLR); | 305 | struct pcie_port *pp = &imx6_pcie->pp; |
256 | val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; | 306 | int ret = 0; |
257 | val |= PCIE_PL_PFLR_FORCE_LINK; | 307 | |
258 | writel(val, pp->dbi_base + PCIE_PL_PFLR); | 308 | switch (imx6_pcie->variant) { |
309 | case IMX6SX: | ||
310 | ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); | ||
311 | if (ret) { | ||
312 | dev_err(pp->dev, "unable to enable pcie_axi clock\n"); | ||
313 | break; | ||
314 | } | ||
259 | 315 | ||
260 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 316 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
261 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | 317 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); |
318 | break; | ||
319 | case IMX6QP: /* FALLTHROUGH */ | ||
320 | case IMX6Q: | ||
321 | /* power up core phy and enable ref clock */ | ||
322 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
323 | IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); | ||
324 | /* | ||
325 | * the async reset input need ref clock to sync internally, | ||
326 | * when the ref clock comes after reset, internal synced | ||
327 | * reset time is too short, cannot meet the requirement. | ||
328 | * add one ~10us delay here. | ||
329 | */ | ||
330 | udelay(10); | ||
331 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
332 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | ||
333 | break; | ||
262 | } | 334 | } |
263 | 335 | ||
264 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 336 | return ret; |
265 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | ||
266 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
267 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | ||
268 | |||
269 | return 0; | ||
270 | } | 337 | } |
271 | 338 | ||
272 | static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) | 339 | static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) |
@@ -292,43 +359,60 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) | |||
292 | goto err_pcie; | 359 | goto err_pcie; |
293 | } | 360 | } |
294 | 361 | ||
295 | /* power up core phy and enable ref clock */ | 362 | ret = imx6_pcie_enable_ref_clk(imx6_pcie); |
296 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 363 | if (ret) { |
297 | IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); | 364 | dev_err(pp->dev, "unable to enable pcie ref clock\n"); |
298 | /* | 365 | goto err_ref_clk; |
299 | * the async reset input need ref clock to sync internally, | 366 | } |
300 | * when the ref clock comes after reset, internal synced | ||
301 | * reset time is too short, cannot meet the requirement. | ||
302 | * add one ~10us delay here. | ||
303 | */ | ||
304 | udelay(10); | ||
305 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
306 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | ||
307 | 367 | ||
308 | /* allow the clocks to stabilize */ | 368 | /* allow the clocks to stabilize */ |
309 | usleep_range(200, 500); | 369 | usleep_range(200, 500); |
310 | 370 | ||
311 | /* Some boards don't have PCIe reset GPIO. */ | 371 | /* Some boards don't have PCIe reset GPIO. */ |
312 | if (imx6_pcie->reset_gpio) { | 372 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { |
313 | gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); | 373 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, |
374 | imx6_pcie->gpio_active_high); | ||
314 | msleep(100); | 375 | msleep(100); |
315 | gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); | 376 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, |
377 | !imx6_pcie->gpio_active_high); | ||
316 | } | 378 | } |
379 | |||
380 | switch (imx6_pcie->variant) { | ||
381 | case IMX6SX: | ||
382 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | ||
383 | IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); | ||
384 | break; | ||
385 | case IMX6QP: | ||
386 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
387 | IMX6Q_GPR1_PCIE_SW_RST, 0); | ||
388 | |||
389 | usleep_range(200, 500); | ||
390 | break; | ||
391 | case IMX6Q: /* Nothing to do */ | ||
392 | break; | ||
393 | } | ||
394 | |||
317 | return 0; | 395 | return 0; |
318 | 396 | ||
397 | err_ref_clk: | ||
398 | clk_disable_unprepare(imx6_pcie->pcie); | ||
319 | err_pcie: | 399 | err_pcie: |
320 | clk_disable_unprepare(imx6_pcie->pcie_bus); | 400 | clk_disable_unprepare(imx6_pcie->pcie_bus); |
321 | err_pcie_bus: | 401 | err_pcie_bus: |
322 | clk_disable_unprepare(imx6_pcie->pcie_phy); | 402 | clk_disable_unprepare(imx6_pcie->pcie_phy); |
323 | err_pcie_phy: | 403 | err_pcie_phy: |
324 | return ret; | 404 | return ret; |
325 | |||
326 | } | 405 | } |
327 | 406 | ||
328 | static void imx6_pcie_init_phy(struct pcie_port *pp) | 407 | static void imx6_pcie_init_phy(struct pcie_port *pp) |
329 | { | 408 | { |
330 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); | 409 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); |
331 | 410 | ||
411 | if (imx6_pcie->variant == IMX6SX) | ||
412 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
413 | IMX6SX_GPR12_PCIE_RX_EQ_MASK, | ||
414 | IMX6SX_GPR12_PCIE_RX_EQ_2); | ||
415 | |||
332 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | 416 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, |
333 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | 417 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); |
334 | 418 | ||
@@ -417,11 +501,15 @@ static int imx6_pcie_establish_link(struct pcie_port *pp) | |||
417 | goto err_reset_phy; | 501 | goto err_reset_phy; |
418 | } | 502 | } |
419 | 503 | ||
420 | /* Allow Gen2 mode after the link is up. */ | 504 | if (imx6_pcie->link_gen == 2) { |
421 | tmp = readl(pp->dbi_base + PCIE_RC_LCR); | 505 | /* Allow Gen2 mode after the link is up. */ |
422 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | 506 | tmp = readl(pp->dbi_base + PCIE_RC_LCR); |
423 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; | 507 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; |
424 | writel(tmp, pp->dbi_base + PCIE_RC_LCR); | 508 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; |
509 | writel(tmp, pp->dbi_base + PCIE_RC_LCR); | ||
510 | } else { | ||
511 | dev_info(pp->dev, "Link: Gen2 disabled\n"); | ||
512 | } | ||
425 | 513 | ||
426 | /* | 514 | /* |
427 | * Start Directed Speed Change so the best possible speed both link | 515 | * Start Directed Speed Change so the best possible speed both link |
@@ -445,8 +533,7 @@ static int imx6_pcie_establish_link(struct pcie_port *pp) | |||
445 | } | 533 | } |
446 | 534 | ||
447 | tmp = readl(pp->dbi_base + PCIE_RC_LCSR); | 535 | tmp = readl(pp->dbi_base + PCIE_RC_LCSR); |
448 | dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); | 536 | dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); |
449 | |||
450 | return 0; | 537 | return 0; |
451 | 538 | ||
452 | err_reset_phy: | 539 | err_reset_phy: |
@@ -523,6 +610,7 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
523 | { | 610 | { |
524 | struct imx6_pcie *imx6_pcie; | 611 | struct imx6_pcie *imx6_pcie; |
525 | struct pcie_port *pp; | 612 | struct pcie_port *pp; |
613 | struct device_node *np = pdev->dev.of_node; | ||
526 | struct resource *dbi_base; | 614 | struct resource *dbi_base; |
527 | struct device_node *node = pdev->dev.of_node; | 615 | struct device_node *node = pdev->dev.of_node; |
528 | int ret; | 616 | int ret; |
@@ -534,6 +622,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
534 | pp = &imx6_pcie->pp; | 622 | pp = &imx6_pcie->pp; |
535 | pp->dev = &pdev->dev; | 623 | pp->dev = &pdev->dev; |
536 | 624 | ||
625 | imx6_pcie->variant = | ||
626 | (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev); | ||
627 | |||
537 | /* Added for PCI abort handling */ | 628 | /* Added for PCI abort handling */ |
538 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, | 629 | hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, |
539 | "imprecise external abort"); | 630 | "imprecise external abort"); |
@@ -544,8 +635,20 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
544 | return PTR_ERR(pp->dbi_base); | 635 | return PTR_ERR(pp->dbi_base); |
545 | 636 | ||
546 | /* Fetch GPIOs */ | 637 | /* Fetch GPIOs */ |
547 | imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", | 638 | imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); |
548 | GPIOD_OUT_LOW); | 639 | imx6_pcie->gpio_active_high = of_property_read_bool(np, |
640 | "reset-gpio-active-high"); | ||
641 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
642 | ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, | ||
643 | imx6_pcie->gpio_active_high ? | ||
644 | GPIOF_OUT_INIT_HIGH : | ||
645 | GPIOF_OUT_INIT_LOW, | ||
646 | "PCIe reset"); | ||
647 | if (ret) { | ||
648 | dev_err(&pdev->dev, "unable to get reset gpio\n"); | ||
649 | return ret; | ||
650 | } | ||
651 | } | ||
549 | 652 | ||
550 | /* Fetch clocks */ | 653 | /* Fetch clocks */ |
551 | imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); | 654 | imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); |
@@ -569,6 +672,16 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
569 | return PTR_ERR(imx6_pcie->pcie); | 672 | return PTR_ERR(imx6_pcie->pcie); |
570 | } | 673 | } |
571 | 674 | ||
675 | if (imx6_pcie->variant == IMX6SX) { | ||
676 | imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev, | ||
677 | "pcie_inbound_axi"); | ||
678 | if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { | ||
679 | dev_err(&pdev->dev, | ||
680 | "pcie_incbound_axi clock missing or invalid\n"); | ||
681 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); | ||
682 | } | ||
683 | } | ||
684 | |||
572 | /* Grab GPR config register range */ | 685 | /* Grab GPR config register range */ |
573 | imx6_pcie->iomuxc_gpr = | 686 | imx6_pcie->iomuxc_gpr = |
574 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | 687 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); |
@@ -598,6 +711,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) | |||
598 | &imx6_pcie->tx_swing_low)) | 711 | &imx6_pcie->tx_swing_low)) |
599 | imx6_pcie->tx_swing_low = 127; | 712 | imx6_pcie->tx_swing_low = 127; |
600 | 713 | ||
714 | /* Limit link speed */ | ||
715 | ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed", | ||
716 | &imx6_pcie->link_gen); | ||
717 | if (ret) | ||
718 | imx6_pcie->link_gen = 1; | ||
719 | |||
601 | ret = imx6_add_pcie_port(pp, pdev); | 720 | ret = imx6_add_pcie_port(pp, pdev); |
602 | if (ret < 0) | 721 | if (ret < 0) |
603 | return ret; | 722 | return ret; |
@@ -615,7 +734,9 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) | |||
615 | } | 734 | } |
616 | 735 | ||
617 | static const struct of_device_id imx6_pcie_of_match[] = { | 736 | static const struct of_device_id imx6_pcie_of_match[] = { |
618 | { .compatible = "fsl,imx6q-pcie", }, | 737 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, |
738 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, | ||
739 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, | ||
619 | {}, | 740 | {}, |
620 | }; | 741 | }; |
621 | MODULE_DEVICE_TABLE(of, imx6_pcie_of_match); | 742 | MODULE_DEVICE_TABLE(of, imx6_pcie_of_match); |
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c index 6153853ca9c3..41515092eb0d 100644 --- a/drivers/pci/host/pci-keystone-dw.c +++ b/drivers/pci/host/pci-keystone-dw.c | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/irqdomain.h> | 16 | #include <linux/irqdomain.h> |
17 | #include <linux/irqreturn.h> | ||
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/of.h> | 19 | #include <linux/of.h> |
19 | #include <linux/of_pci.h> | 20 | #include <linux/of_pci.h> |
@@ -53,6 +54,21 @@ | |||
53 | #define IRQ_STATUS 0x184 | 54 | #define IRQ_STATUS 0x184 |
54 | #define MSI_IRQ_OFFSET 4 | 55 | #define MSI_IRQ_OFFSET 4 |
55 | 56 | ||
57 | /* Error IRQ bits */ | ||
58 | #define ERR_AER BIT(5) /* ECRC error */ | ||
59 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | ||
60 | #define ERR_CORR BIT(3) /* Correctable error */ | ||
61 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | ||
62 | #define ERR_FATAL BIT(1) /* Fatal error */ | ||
63 | #define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ | ||
64 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | ||
65 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | ||
66 | #define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) | ||
67 | #define ERR_IRQ_STATUS_RAW 0x1c0 | ||
68 | #define ERR_IRQ_STATUS 0x1c4 | ||
69 | #define ERR_IRQ_ENABLE_SET 0x1c8 | ||
70 | #define ERR_IRQ_ENABLE_CLR 0x1cc | ||
71 | |||
56 | /* Config space registers */ | 72 | /* Config space registers */ |
57 | #define DEBUG0 0x728 | 73 | #define DEBUG0 0x728 |
58 | 74 | ||
@@ -243,6 +259,28 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) | |||
243 | writel(offset, ks_pcie->va_app_base + IRQ_EOI); | 259 | writel(offset, ks_pcie->va_app_base + IRQ_EOI); |
244 | } | 260 | } |
245 | 261 | ||
262 | void ks_dw_pcie_enable_error_irq(void __iomem *reg_base) | ||
263 | { | ||
264 | writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET); | ||
265 | } | ||
266 | |||
267 | irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, | ||
268 | void __iomem *reg_base) | ||
269 | { | ||
270 | u32 status; | ||
271 | |||
272 | status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; | ||
273 | if (!status) | ||
274 | return IRQ_NONE; | ||
275 | |||
276 | if (status & ERR_FATAL_IRQ) | ||
277 | dev_err(dev, "fatal error (status %#010x)\n", status); | ||
278 | |||
279 | /* Ack the IRQ; status bits are RW1C */ | ||
280 | writel(status, reg_base + ERR_IRQ_STATUS); | ||
281 | return IRQ_HANDLED; | ||
282 | } | ||
283 | |||
246 | static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) | 284 | static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) |
247 | { | 285 | { |
248 | } | 286 | } |
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index b71f55bb0315..6b8301ef21ca 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/irqchip/chained_irq.h> | 15 | #include <linux/irqchip/chained_irq.h> |
16 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/interrupt.h> | ||
18 | #include <linux/irqdomain.h> | 19 | #include <linux/irqdomain.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
20 | #include <linux/msi.h> | 21 | #include <linux/msi.h> |
@@ -159,7 +160,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | |||
159 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | 160 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, |
160 | char *controller, int *num_irqs) | 161 | char *controller, int *num_irqs) |
161 | { | 162 | { |
162 | int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL; | 163 | int temp, max_host_irqs, legacy = 1, *host_irqs; |
163 | struct device *dev = ks_pcie->pp.dev; | 164 | struct device *dev = ks_pcie->pp.dev; |
164 | struct device_node *np_pcie = dev->of_node, **np_temp; | 165 | struct device_node *np_pcie = dev->of_node, **np_temp; |
165 | 166 | ||
@@ -180,11 +181,15 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | |||
180 | *np_temp = of_find_node_by_name(np_pcie, controller); | 181 | *np_temp = of_find_node_by_name(np_pcie, controller); |
181 | if (!(*np_temp)) { | 182 | if (!(*np_temp)) { |
182 | dev_err(dev, "Node for %s is absent\n", controller); | 183 | dev_err(dev, "Node for %s is absent\n", controller); |
183 | goto out; | 184 | return -EINVAL; |
184 | } | 185 | } |
186 | |||
185 | temp = of_irq_count(*np_temp); | 187 | temp = of_irq_count(*np_temp); |
186 | if (!temp) | 188 | if (!temp) { |
187 | goto out; | 189 | dev_err(dev, "No IRQ entries in %s\n", controller); |
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
188 | if (temp > max_host_irqs) | 193 | if (temp > max_host_irqs) |
189 | dev_warn(dev, "Too many %s interrupts defined %u\n", | 194 | dev_warn(dev, "Too many %s interrupts defined %u\n", |
190 | (legacy ? "legacy" : "MSI"), temp); | 195 | (legacy ? "legacy" : "MSI"), temp); |
@@ -198,12 +203,13 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | |||
198 | if (!host_irqs[temp]) | 203 | if (!host_irqs[temp]) |
199 | break; | 204 | break; |
200 | } | 205 | } |
206 | |||
201 | if (temp) { | 207 | if (temp) { |
202 | *num_irqs = temp; | 208 | *num_irqs = temp; |
203 | ret = 0; | 209 | return 0; |
204 | } | 210 | } |
205 | out: | 211 | |
206 | return ret; | 212 | return -EINVAL; |
207 | } | 213 | } |
208 | 214 | ||
209 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | 215 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) |
@@ -226,6 +232,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |||
226 | ks_pcie); | 232 | ks_pcie); |
227 | } | 233 | } |
228 | } | 234 | } |
235 | |||
236 | if (ks_pcie->error_irq > 0) | ||
237 | ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base); | ||
229 | } | 238 | } |
230 | 239 | ||
231 | /* | 240 | /* |
@@ -289,6 +298,14 @@ static struct pcie_host_ops keystone_pcie_host_ops = { | |||
289 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, | 298 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, |
290 | }; | 299 | }; |
291 | 300 | ||
301 | static irqreturn_t pcie_err_irq_handler(int irq, void *priv) | ||
302 | { | ||
303 | struct keystone_pcie *ks_pcie = priv; | ||
304 | |||
305 | return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev, | ||
306 | ks_pcie->va_app_base); | ||
307 | } | ||
308 | |||
292 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | 309 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, |
293 | struct platform_device *pdev) | 310 | struct platform_device *pdev) |
294 | { | 311 | { |
@@ -309,6 +326,22 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
309 | return ret; | 326 | return ret; |
310 | } | 327 | } |
311 | 328 | ||
329 | /* | ||
330 | * Index 0 is the platform interrupt for error interrupt | ||
331 | * from RC. This is optional. | ||
332 | */ | ||
333 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | ||
334 | if (ks_pcie->error_irq <= 0) | ||
335 | dev_info(&pdev->dev, "no error IRQ defined\n"); | ||
336 | else { | ||
337 | if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler, | ||
338 | IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) { | ||
339 | dev_err(&pdev->dev, "failed to request error IRQ %d\n", | ||
340 | ks_pcie->error_irq); | ||
341 | return ret; | ||
342 | } | ||
343 | } | ||
344 | |||
312 | pp->root_bus_nr = -1; | 345 | pp->root_bus_nr = -1; |
313 | pp->ops = &keystone_pcie_host_ops; | 346 | pp->ops = &keystone_pcie_host_ops; |
314 | ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); | 347 | ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); |
@@ -317,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
317 | return ret; | 350 | return ret; |
318 | } | 351 | } |
319 | 352 | ||
320 | return ret; | 353 | return 0; |
321 | } | 354 | } |
322 | 355 | ||
323 | static const struct of_device_id ks_pcie_of_match[] = { | 356 | static const struct of_device_id ks_pcie_of_match[] = { |
@@ -346,7 +379,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
346 | struct resource *res; | 379 | struct resource *res; |
347 | void __iomem *reg_p; | 380 | void __iomem *reg_p; |
348 | struct phy *phy; | 381 | struct phy *phy; |
349 | int ret = 0; | 382 | int ret; |
350 | 383 | ||
351 | ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), | 384 | ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), |
352 | GFP_KERNEL); | 385 | GFP_KERNEL); |
@@ -376,6 +409,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
376 | devm_release_mem_region(dev, res->start, resource_size(res)); | 409 | devm_release_mem_region(dev, res->start, resource_size(res)); |
377 | 410 | ||
378 | pp->dev = dev; | 411 | pp->dev = dev; |
412 | ks_pcie->np = dev->of_node; | ||
379 | platform_set_drvdata(pdev, ks_pcie); | 413 | platform_set_drvdata(pdev, ks_pcie); |
380 | ks_pcie->clk = devm_clk_get(dev, "pcie"); | 414 | ks_pcie->clk = devm_clk_get(dev, "pcie"); |
381 | if (IS_ERR(ks_pcie->clk)) { | 415 | if (IS_ERR(ks_pcie->clk)) { |
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h index f0944e8c4b02..a5b0cb2ba4d7 100644 --- a/drivers/pci/host/pci-keystone.h +++ b/drivers/pci/host/pci-keystone.h | |||
@@ -29,6 +29,9 @@ struct keystone_pcie { | |||
29 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; | 29 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; |
30 | struct device_node *msi_intc_np; | 30 | struct device_node *msi_intc_np; |
31 | struct irq_domain *legacy_irq_domain; | 31 | struct irq_domain *legacy_irq_domain; |
32 | struct device_node *np; | ||
33 | |||
34 | int error_irq; | ||
32 | 35 | ||
33 | /* Application register space */ | 36 | /* Application register space */ |
34 | void __iomem *va_app_base; | 37 | void __iomem *va_app_base; |
@@ -42,6 +45,9 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); | |||
42 | /* Keystone specific PCI controller APIs */ | 45 | /* Keystone specific PCI controller APIs */ |
43 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); | 46 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); |
44 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); | 47 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); |
48 | void ks_dw_pcie_enable_error_irq(void __iomem *reg_base); | ||
49 | irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, | ||
50 | void __iomem *reg_base); | ||
45 | int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | 51 | int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, |
46 | struct device_node *msi_intc_np); | 52 | struct device_node *msi_intc_np); |
47 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 53 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 53b79c5f0559..6b451df6502c 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -1003,6 +1003,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) | |||
1003 | pcie->msi->dev = &pcie->pdev->dev; | 1003 | pcie->msi->dev = &pcie->pdev->dev; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | #ifdef CONFIG_PM_SLEEP | ||
1006 | static int mvebu_pcie_suspend(struct device *dev) | 1007 | static int mvebu_pcie_suspend(struct device *dev) |
1007 | { | 1008 | { |
1008 | struct mvebu_pcie *pcie; | 1009 | struct mvebu_pcie *pcie; |
@@ -1031,6 +1032,7 @@ static int mvebu_pcie_resume(struct device *dev) | |||
1031 | 1032 | ||
1032 | return 0; | 1033 | return 0; |
1033 | } | 1034 | } |
1035 | #endif | ||
1034 | 1036 | ||
1035 | static void mvebu_pcie_port_clk_put(void *data) | 1037 | static void mvebu_pcie_port_clk_put(void *data) |
1036 | { | 1038 | { |
@@ -1298,9 +1300,8 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = { | |||
1298 | }; | 1300 | }; |
1299 | MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); | 1301 | MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); |
1300 | 1302 | ||
1301 | static struct dev_pm_ops mvebu_pcie_pm_ops = { | 1303 | static const struct dev_pm_ops mvebu_pcie_pm_ops = { |
1302 | .suspend_noirq = mvebu_pcie_suspend, | 1304 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) |
1303 | .resume_noirq = mvebu_pcie_resume, | ||
1304 | }; | 1305 | }; |
1305 | 1306 | ||
1306 | static struct platform_driver mvebu_pcie_driver = { | 1307 | static struct platform_driver mvebu_pcie_driver = { |
diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index cabb92a514ac..d4a779761472 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c | |||
@@ -153,11 +153,11 @@ static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, | |||
153 | * reserved bits, this makes the code simpler and is OK as the bits | 153 | * reserved bits, this makes the code simpler and is OK as the bits |
154 | * are not affected by writing zeros to them. | 154 | * are not affected by writing zeros to them. |
155 | */ | 155 | */ |
156 | static u32 thunder_pem_bridge_w1c_bits(int where) | 156 | static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) |
157 | { | 157 | { |
158 | u32 w1c_bits = 0; | 158 | u32 w1c_bits = 0; |
159 | 159 | ||
160 | switch (where & ~3) { | 160 | switch (where_aligned) { |
161 | case 0x04: /* Command/Status */ | 161 | case 0x04: /* Command/Status */ |
162 | case 0x1c: /* Base and I/O Limit/Secondary Status */ | 162 | case 0x1c: /* Base and I/O Limit/Secondary Status */ |
163 | w1c_bits = 0xff000000; | 163 | w1c_bits = 0xff000000; |
@@ -184,12 +184,34 @@ static u32 thunder_pem_bridge_w1c_bits(int where) | |||
184 | return w1c_bits; | 184 | return w1c_bits; |
185 | } | 185 | } |
186 | 186 | ||
187 | /* Some bits must be written to one so they appear to be read-only. */ | ||
188 | static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) | ||
189 | { | ||
190 | u32 w1_bits; | ||
191 | |||
192 | switch (where_aligned) { | ||
193 | case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ | ||
194 | /* Force 32-bit I/O addressing. */ | ||
195 | w1_bits = 0x0101; | ||
196 | break; | ||
197 | case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ | ||
198 | /* Force 64-bit addressing */ | ||
199 | w1_bits = 0x00010001; | ||
200 | break; | ||
201 | default: | ||
202 | w1_bits = 0; | ||
203 | break; | ||
204 | } | ||
205 | return w1_bits; | ||
206 | } | ||
207 | |||
187 | static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, | 208 | static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, |
188 | int where, int size, u32 val) | 209 | int where, int size, u32 val) |
189 | { | 210 | { |
190 | struct gen_pci *pci = bus->sysdata; | 211 | struct gen_pci *pci = bus->sysdata; |
191 | struct thunder_pem_pci *pem_pci; | 212 | struct thunder_pem_pci *pem_pci; |
192 | u64 write_val, read_val; | 213 | u64 write_val, read_val; |
214 | u64 where_aligned = where & ~3ull; | ||
193 | u32 mask = 0; | 215 | u32 mask = 0; |
194 | 216 | ||
195 | pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci); | 217 | pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci); |
@@ -205,8 +227,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, | |||
205 | */ | 227 | */ |
206 | switch (size) { | 228 | switch (size) { |
207 | case 1: | 229 | case 1: |
208 | read_val = where & ~3ull; | 230 | writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); |
209 | writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
210 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | 231 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); |
211 | read_val >>= 32; | 232 | read_val >>= 32; |
212 | mask = ~(0xff << (8 * (where & 3))); | 233 | mask = ~(0xff << (8 * (where & 3))); |
@@ -215,8 +236,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, | |||
215 | val |= (u32)read_val; | 236 | val |= (u32)read_val; |
216 | break; | 237 | break; |
217 | case 2: | 238 | case 2: |
218 | read_val = where & ~3ull; | 239 | writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); |
219 | writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
220 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | 240 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); |
221 | read_val >>= 32; | 241 | read_val >>= 32; |
222 | mask = ~(0xffff << (8 * (where & 3))); | 242 | mask = ~(0xffff << (8 * (where & 3))); |
@@ -244,11 +264,17 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, | |||
244 | } | 264 | } |
245 | 265 | ||
246 | /* | 266 | /* |
267 | * Some bits must be read-only with value of one. Since the | ||
268 | * access method allows these to be cleared if a zero is | ||
269 | * written, force them to one before writing. | ||
270 | */ | ||
271 | val |= thunder_pem_bridge_w1_bits(where_aligned); | ||
272 | |||
273 | /* | ||
247 | * Low order bits are the config address, the high order 32 | 274 | * Low order bits are the config address, the high order 32 |
248 | * bits are the data to be written. | 275 | * bits are the data to be written. |
249 | */ | 276 | */ |
250 | write_val = where & ~3ull; | 277 | write_val = (((u64)val) << 32) | where_aligned; |
251 | write_val |= (((u64)val) << 32); | ||
252 | writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); | 278 | writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); |
253 | return PCIBIOS_SUCCESSFUL; | 279 | return PCIBIOS_SUCCESSFUL; |
254 | } | 280 | } |
diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c new file mode 100644 index 000000000000..55723567b5d4 --- /dev/null +++ b/drivers/pci/host/pcie-armada8k.c | |||
@@ -0,0 +1,262 @@ | |||
1 | /* | ||
2 | * PCIe host controller driver for Marvell Armada-8K SoCs | ||
3 | * | ||
4 | * Armada-8K PCIe Glue Layer Source Code | ||
5 | * | ||
6 | * Copyright (C) 2016 Marvell Technology Group Ltd. | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public | ||
9 | * License version 2. This program is licensed "as is" without any | ||
10 | * warranty of any kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/pci.h> | ||
20 | #include <linux/phy/phy.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/resource.h> | ||
23 | #include <linux/of_pci.h> | ||
24 | #include <linux/of_irq.h> | ||
25 | |||
26 | #include "pcie-designware.h" | ||
27 | |||
28 | struct armada8k_pcie { | ||
29 | void __iomem *base; | ||
30 | struct clk *clk; | ||
31 | struct pcie_port pp; | ||
32 | }; | ||
33 | |||
34 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 | ||
35 | |||
36 | #define PCIE_GLOBAL_CONTROL_REG 0x0 | ||
37 | #define PCIE_APP_LTSSM_EN BIT(2) | ||
38 | #define PCIE_DEVICE_TYPE_SHIFT 4 | ||
39 | #define PCIE_DEVICE_TYPE_MASK 0xF | ||
40 | #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ | ||
41 | |||
42 | #define PCIE_GLOBAL_STATUS_REG 0x8 | ||
43 | #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) | ||
44 | #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) | ||
45 | |||
46 | #define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C | ||
47 | #define PCIE_GLOBAL_INT_MASK1_REG 0x20 | ||
48 | #define PCIE_INT_A_ASSERT_MASK BIT(9) | ||
49 | #define PCIE_INT_B_ASSERT_MASK BIT(10) | ||
50 | #define PCIE_INT_C_ASSERT_MASK BIT(11) | ||
51 | #define PCIE_INT_D_ASSERT_MASK BIT(12) | ||
52 | |||
53 | #define PCIE_ARCACHE_TRC_REG 0x50 | ||
54 | #define PCIE_AWCACHE_TRC_REG 0x54 | ||
55 | #define PCIE_ARUSER_REG 0x5C | ||
56 | #define PCIE_AWUSER_REG 0x60 | ||
57 | /* | ||
58 | * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write | ||
59 | * allocate | ||
60 | */ | ||
61 | #define ARCACHE_DEFAULT_VALUE 0x3511 | ||
62 | #define AWCACHE_DEFAULT_VALUE 0x5311 | ||
63 | |||
64 | #define DOMAIN_OUTER_SHAREABLE 0x2 | ||
65 | #define AX_USER_DOMAIN_MASK 0x3 | ||
66 | #define AX_USER_DOMAIN_SHIFT 4 | ||
67 | |||
68 | #define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp) | ||
69 | |||
70 | static int armada8k_pcie_link_up(struct pcie_port *pp) | ||
71 | { | ||
72 | struct armada8k_pcie *pcie = to_armada8k_pcie(pp); | ||
73 | u32 reg; | ||
74 | u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; | ||
75 | |||
76 | reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG); | ||
77 | |||
78 | if ((reg & mask) == mask) | ||
79 | return 1; | ||
80 | |||
81 | dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg); | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static void armada8k_pcie_establish_link(struct pcie_port *pp) | ||
86 | { | ||
87 | struct armada8k_pcie *pcie = to_armada8k_pcie(pp); | ||
88 | void __iomem *base = pcie->base; | ||
89 | u32 reg; | ||
90 | |||
91 | if (!dw_pcie_link_up(pp)) { | ||
92 | /* Disable LTSSM state machine to enable configuration */ | ||
93 | reg = readl(base + PCIE_GLOBAL_CONTROL_REG); | ||
94 | reg &= ~(PCIE_APP_LTSSM_EN); | ||
95 | writel(reg, base + PCIE_GLOBAL_CONTROL_REG); | ||
96 | } | ||
97 | |||
98 | /* Set the device to root complex mode */ | ||
99 | reg = readl(base + PCIE_GLOBAL_CONTROL_REG); | ||
100 | reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); | ||
101 | reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; | ||
102 | writel(reg, base + PCIE_GLOBAL_CONTROL_REG); | ||
103 | |||
104 | /* Set the PCIe master AxCache attributes */ | ||
105 | writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG); | ||
106 | writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG); | ||
107 | |||
108 | /* Set the PCIe master AxDomain attributes */ | ||
109 | reg = readl(base + PCIE_ARUSER_REG); | ||
110 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
111 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
112 | writel(reg, base + PCIE_ARUSER_REG); | ||
113 | |||
114 | reg = readl(base + PCIE_AWUSER_REG); | ||
115 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
116 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
117 | writel(reg, base + PCIE_AWUSER_REG); | ||
118 | |||
119 | /* Enable INT A-D interrupts */ | ||
120 | reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG); | ||
121 | reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | | ||
122 | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; | ||
123 | writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG); | ||
124 | |||
125 | if (!dw_pcie_link_up(pp)) { | ||
126 | /* Configuration done. Start LTSSM */ | ||
127 | reg = readl(base + PCIE_GLOBAL_CONTROL_REG); | ||
128 | reg |= PCIE_APP_LTSSM_EN; | ||
129 | writel(reg, base + PCIE_GLOBAL_CONTROL_REG); | ||
130 | } | ||
131 | |||
132 | /* Wait until the link becomes active again */ | ||
133 | if (dw_pcie_wait_for_link(pp)) | ||
134 | dev_err(pp->dev, "Link not up after reconfiguration\n"); | ||
135 | } | ||
136 | |||
137 | static void armada8k_pcie_host_init(struct pcie_port *pp) | ||
138 | { | ||
139 | dw_pcie_setup_rc(pp); | ||
140 | armada8k_pcie_establish_link(pp); | ||
141 | } | ||
142 | |||
143 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) | ||
144 | { | ||
145 | struct pcie_port *pp = arg; | ||
146 | struct armada8k_pcie *pcie = to_armada8k_pcie(pp); | ||
147 | void __iomem *base = pcie->base; | ||
148 | u32 val; | ||
149 | |||
150 | /* | ||
151 | * Interrupts are directly handled by the device driver of the | ||
152 | * PCI device. However, they are also latched into the PCIe | ||
153 | * controller, so we simply discard them. | ||
154 | */ | ||
155 | val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG); | ||
156 | writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG); | ||
157 | |||
158 | return IRQ_HANDLED; | ||
159 | } | ||
160 | |||
161 | static struct pcie_host_ops armada8k_pcie_host_ops = { | ||
162 | .link_up = armada8k_pcie_link_up, | ||
163 | .host_init = armada8k_pcie_host_init, | ||
164 | }; | ||
165 | |||
166 | static int armada8k_add_pcie_port(struct pcie_port *pp, | ||
167 | struct platform_device *pdev) | ||
168 | { | ||
169 | struct device *dev = &pdev->dev; | ||
170 | int ret; | ||
171 | |||
172 | pp->root_bus_nr = -1; | ||
173 | pp->ops = &armada8k_pcie_host_ops; | ||
174 | |||
175 | pp->irq = platform_get_irq(pdev, 0); | ||
176 | if (!pp->irq) { | ||
177 | dev_err(dev, "failed to get irq for port\n"); | ||
178 | return -ENODEV; | ||
179 | } | ||
180 | |||
181 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, | ||
182 | IRQF_SHARED, "armada8k-pcie", pp); | ||
183 | if (ret) { | ||
184 | dev_err(dev, "failed to request irq %d\n", pp->irq); | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | ret = dw_pcie_host_init(pp); | ||
189 | if (ret) { | ||
190 | dev_err(dev, "failed to initialize host: %d\n", ret); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int armada8k_pcie_probe(struct platform_device *pdev) | ||
198 | { | ||
199 | struct armada8k_pcie *pcie; | ||
200 | struct pcie_port *pp; | ||
201 | struct device *dev = &pdev->dev; | ||
202 | struct resource *base; | ||
203 | int ret; | ||
204 | |||
205 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
206 | if (!pcie) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | pcie->clk = devm_clk_get(dev, NULL); | ||
210 | if (IS_ERR(pcie->clk)) | ||
211 | return PTR_ERR(pcie->clk); | ||
212 | |||
213 | clk_prepare_enable(pcie->clk); | ||
214 | |||
215 | pp = &pcie->pp; | ||
216 | pp->dev = dev; | ||
217 | platform_set_drvdata(pdev, pcie); | ||
218 | |||
219 | /* Get the dw-pcie unit configuration/control registers base. */ | ||
220 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); | ||
221 | pp->dbi_base = devm_ioremap_resource(dev, base); | ||
222 | if (IS_ERR(pp->dbi_base)) { | ||
223 | dev_err(dev, "couldn't remap regs base %p\n", base); | ||
224 | ret = PTR_ERR(pp->dbi_base); | ||
225 | goto fail; | ||
226 | } | ||
227 | |||
228 | pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET; | ||
229 | |||
230 | ret = armada8k_add_pcie_port(pp, pdev); | ||
231 | if (ret) | ||
232 | goto fail; | ||
233 | |||
234 | return 0; | ||
235 | |||
236 | fail: | ||
237 | if (!IS_ERR(pcie->clk)) | ||
238 | clk_disable_unprepare(pcie->clk); | ||
239 | |||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | static const struct of_device_id armada8k_pcie_of_match[] = { | ||
244 | { .compatible = "marvell,armada8k-pcie", }, | ||
245 | {}, | ||
246 | }; | ||
247 | MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match); | ||
248 | |||
249 | static struct platform_driver armada8k_pcie_driver = { | ||
250 | .probe = armada8k_pcie_probe, | ||
251 | .driver = { | ||
252 | .name = "armada8k-pcie", | ||
253 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), | ||
254 | }, | ||
255 | }; | ||
256 | |||
257 | module_platform_driver(armada8k_pcie_driver); | ||
258 | |||
259 | MODULE_DESCRIPTION("Armada 8k PCIe host controller driver"); | ||
260 | MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>"); | ||
261 | MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>"); | ||
262 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 5139e6443bbd..3479d30e2be8 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c | |||
@@ -819,7 +819,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) | |||
819 | 819 | ||
820 | err = nwl_pcie_bridge_init(pcie); | 820 | err = nwl_pcie_bridge_init(pcie); |
821 | if (err) { | 821 | if (err) { |
822 | dev_err(pcie->dev, "HW Initalization failed\n"); | 822 | dev_err(pcie->dev, "HW Initialization failed\n"); |
823 | return err; | 823 | return err; |
824 | } | 824 | } |
825 | 825 | ||
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 2f6d3a1c1726..f6221d739f59 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c | |||
@@ -138,6 +138,8 @@ static union apci_descriptor *ibm_slot_from_id(int id) | |||
138 | char *table; | 138 | char *table; |
139 | 139 | ||
140 | size = ibm_get_table_from_acpi(&table); | 140 | size = ibm_get_table_from_acpi(&table); |
141 | if (size < 0) | ||
142 | return NULL; | ||
141 | des = (union apci_descriptor *)table; | 143 | des = (union apci_descriptor *)table; |
142 | if (memcmp(des->header.sig, "aPCI", 4) != 0) | 144 | if (memcmp(des->header.sig, "aPCI", 4) != 0) |
143 | goto ibm_slot_done; | 145 | goto ibm_slot_done; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 25e0327d4429..7542001de9b0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -2389,7 +2389,7 @@ out: | |||
2389 | return offset + ent_size; | 2389 | return offset + ent_size; |
2390 | } | 2390 | } |
2391 | 2391 | ||
2392 | /* Enhanced Allocation Initalization */ | 2392 | /* Enhanced Allocation Initialization */ |
2393 | void pci_ea_init(struct pci_dev *dev) | 2393 | void pci_ea_init(struct pci_dev *dev) |
2394 | { | 2394 | { |
2395 | int ea; | 2395 | int ea; |
@@ -2547,7 +2547,7 @@ void pci_request_acs(void) | |||
2547 | * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites | 2547 | * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites |
2548 | * @dev: the PCI device | 2548 | * @dev: the PCI device |
2549 | */ | 2549 | */ |
2550 | static int pci_std_enable_acs(struct pci_dev *dev) | 2550 | static void pci_std_enable_acs(struct pci_dev *dev) |
2551 | { | 2551 | { |
2552 | int pos; | 2552 | int pos; |
2553 | u16 cap; | 2553 | u16 cap; |
@@ -2555,7 +2555,7 @@ static int pci_std_enable_acs(struct pci_dev *dev) | |||
2555 | 2555 | ||
2556 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | 2556 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); |
2557 | if (!pos) | 2557 | if (!pos) |
2558 | return -ENODEV; | 2558 | return; |
2559 | 2559 | ||
2560 | pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); | 2560 | pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); |
2561 | pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); | 2561 | pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
@@ -2573,8 +2573,6 @@ static int pci_std_enable_acs(struct pci_dev *dev) | |||
2573 | ctrl |= (cap & PCI_ACS_UF); | 2573 | ctrl |= (cap & PCI_ACS_UF); |
2574 | 2574 | ||
2575 | pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); | 2575 | pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
2576 | |||
2577 | return 0; | ||
2578 | } | 2576 | } |
2579 | 2577 | ||
2580 | /** | 2578 | /** |
@@ -2586,10 +2584,10 @@ void pci_enable_acs(struct pci_dev *dev) | |||
2586 | if (!pci_acs_enable) | 2584 | if (!pci_acs_enable) |
2587 | return; | 2585 | return; |
2588 | 2586 | ||
2589 | if (!pci_std_enable_acs(dev)) | 2587 | if (!pci_dev_specific_enable_acs(dev)) |
2590 | return; | 2588 | return; |
2591 | 2589 | ||
2592 | pci_dev_specific_enable_acs(dev); | 2590 | pci_std_enable_acs(dev); |
2593 | } | 2591 | } |
2594 | 2592 | ||
2595 | static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) | 2593 | static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) |
@@ -4578,6 +4576,37 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, | |||
4578 | return 0; | 4576 | return 0; |
4579 | } | 4577 | } |
4580 | 4578 | ||
4579 | /** | ||
4580 | * pci_add_dma_alias - Add a DMA devfn alias for a device | ||
4581 | * @dev: the PCI device for which alias is added | ||
4582 | * @devfn: alias slot and function | ||
4583 | * | ||
4584 | * This helper encodes 8-bit devfn as bit number in dma_alias_mask. | ||
4585 | * It should be called early, preferably as PCI fixup header quirk. | ||
4586 | */ | ||
4587 | void pci_add_dma_alias(struct pci_dev *dev, u8 devfn) | ||
4588 | { | ||
4589 | if (!dev->dma_alias_mask) | ||
4590 | dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), | ||
4591 | sizeof(long), GFP_KERNEL); | ||
4592 | if (!dev->dma_alias_mask) { | ||
4593 | dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n"); | ||
4594 | return; | ||
4595 | } | ||
4596 | |||
4597 | set_bit(devfn, dev->dma_alias_mask); | ||
4598 | dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", | ||
4599 | PCI_SLOT(devfn), PCI_FUNC(devfn)); | ||
4600 | } | ||
4601 | |||
4602 | bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) | ||
4603 | { | ||
4604 | return (dev1->dma_alias_mask && | ||
4605 | test_bit(dev2->devfn, dev1->dma_alias_mask)) || | ||
4606 | (dev2->dma_alias_mask && | ||
4607 | test_bit(dev1->devfn, dev2->dma_alias_mask)); | ||
4608 | } | ||
4609 | |||
4581 | bool pci_device_is_present(struct pci_dev *pdev) | 4610 | bool pci_device_is_present(struct pci_dev *pdev) |
4582 | { | 4611 | { |
4583 | u32 v; | 4612 | u32 v; |
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index d525548404d6..463b60975194 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
@@ -67,17 +67,14 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} | |||
67 | #endif /* !CONFIG_PCIE_PME */ | 67 | #endif /* !CONFIG_PCIE_PME */ |
68 | 68 | ||
69 | #ifdef CONFIG_ACPI | 69 | #ifdef CONFIG_ACPI |
70 | int pcie_port_acpi_setup(struct pci_dev *port, int *mask); | 70 | void pcie_port_acpi_setup(struct pci_dev *port, int *mask); |
71 | 71 | ||
72 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | 72 | static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask) |
73 | { | 73 | { |
74 | return pcie_port_acpi_setup(port, mask); | 74 | pcie_port_acpi_setup(port, mask); |
75 | } | 75 | } |
76 | #else /* !CONFIG_ACPI */ | 76 | #else /* !CONFIG_ACPI */ |
77 | static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) | 77 | static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){} |
78 | { | ||
79 | return 0; | ||
80 | } | ||
81 | #endif /* !CONFIG_ACPI */ | 78 | #endif /* !CONFIG_ACPI */ |
82 | 79 | ||
83 | #endif /* _PORTDRV_H_ */ | 80 | #endif /* _PORTDRV_H_ */ |
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c index b4d2894ee3fc..9f4ed7172fda 100644 --- a/drivers/pci/pcie/portdrv_acpi.c +++ b/drivers/pci/pcie/portdrv_acpi.c | |||
@@ -32,22 +32,22 @@ | |||
32 | * NOTE: It turns out that we cannot do that for individual port services | 32 | * NOTE: It turns out that we cannot do that for individual port services |
33 | * separately, because that would make some systems work incorrectly. | 33 | * separately, because that would make some systems work incorrectly. |
34 | */ | 34 | */ |
35 | int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | 35 | void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) |
36 | { | 36 | { |
37 | struct acpi_pci_root *root; | 37 | struct acpi_pci_root *root; |
38 | acpi_handle handle; | 38 | acpi_handle handle; |
39 | u32 flags; | 39 | u32 flags; |
40 | 40 | ||
41 | if (acpi_pci_disabled) | 41 | if (acpi_pci_disabled) |
42 | return 0; | 42 | return; |
43 | 43 | ||
44 | handle = acpi_find_root_bridge_handle(port); | 44 | handle = acpi_find_root_bridge_handle(port); |
45 | if (!handle) | 45 | if (!handle) |
46 | return -EINVAL; | 46 | return; |
47 | 47 | ||
48 | root = acpi_pci_find_root(handle); | 48 | root = acpi_pci_find_root(handle); |
49 | if (!root) | 49 | if (!root) |
50 | return -ENODEV; | 50 | return; |
51 | 51 | ||
52 | flags = root->osc_control_set; | 52 | flags = root->osc_control_set; |
53 | 53 | ||
@@ -58,6 +58,4 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) | |||
58 | *srv_mask |= PCIE_PORT_SERVICE_PME; | 58 | *srv_mask |= PCIE_PORT_SERVICE_PME; |
59 | if (flags & OSC_PCI_EXPRESS_AER_CONTROL) | 59 | if (flags & OSC_PCI_EXPRESS_AER_CONTROL) |
60 | *srv_mask |= PCIE_PORT_SERVICE_AER; | 60 | *srv_mask |= PCIE_PORT_SERVICE_AER; |
61 | |||
62 | return 0; | ||
63 | } | 61 | } |
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 88122dc2e1b1..de7a85bc8016 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
@@ -256,7 +256,6 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
256 | int services = 0; | 256 | int services = 0; |
257 | u32 reg32; | 257 | u32 reg32; |
258 | int cap_mask = 0; | 258 | int cap_mask = 0; |
259 | int err; | ||
260 | 259 | ||
261 | if (pcie_ports_disabled) | 260 | if (pcie_ports_disabled) |
262 | return 0; | 261 | return 0; |
@@ -266,11 +265,8 @@ static int get_port_device_capability(struct pci_dev *dev) | |||
266 | if (pci_aer_available()) | 265 | if (pci_aer_available()) |
267 | cap_mask |= PCIE_PORT_SERVICE_AER; | 266 | cap_mask |= PCIE_PORT_SERVICE_AER; |
268 | 267 | ||
269 | if (pcie_ports_auto) { | 268 | if (pcie_ports_auto) |
270 | err = pcie_port_platform_notify(dev, &cap_mask); | 269 | pcie_port_platform_notify(dev, &cap_mask); |
271 | if (err) | ||
272 | return 0; | ||
273 | } | ||
274 | 270 | ||
275 | /* Hot-Plug Capable */ | 271 | /* Hot-Plug Capable */ |
276 | if ((cap_mask & PCIE_PORT_SERVICE_HP) && | 272 | if ((cap_mask & PCIE_PORT_SERVICE_HP) && |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 8004f67c57ec..ae7daeb83e21 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1537,6 +1537,7 @@ static void pci_release_dev(struct device *dev) | |||
1537 | pcibios_release_device(pci_dev); | 1537 | pcibios_release_device(pci_dev); |
1538 | pci_bus_put(pci_dev->bus); | 1538 | pci_bus_put(pci_dev->bus); |
1539 | kfree(pci_dev->driver_override); | 1539 | kfree(pci_dev->driver_override); |
1540 | kfree(pci_dev->dma_alias_mask); | ||
1540 | kfree(pci_dev); | 1541 | kfree(pci_dev); |
1541 | } | 1542 | } |
1542 | 1543 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8e678027b900..ee72ebe18f4b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -3150,6 +3150,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, | |||
3150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, | 3150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, |
3151 | quirk_broken_intx_masking); | 3151 | quirk_broken_intx_masking); |
3152 | 3152 | ||
3153 | /* | ||
3154 | * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, | ||
3155 | * DisINTx can be set but the interrupt status bit is non-functional. | ||
3156 | */ | ||
3157 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572, | ||
3158 | quirk_broken_intx_masking); | ||
3159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574, | ||
3160 | quirk_broken_intx_masking); | ||
3161 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580, | ||
3162 | quirk_broken_intx_masking); | ||
3163 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581, | ||
3164 | quirk_broken_intx_masking); | ||
3165 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583, | ||
3166 | quirk_broken_intx_masking); | ||
3167 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584, | ||
3168 | quirk_broken_intx_masking); | ||
3169 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585, | ||
3170 | quirk_broken_intx_masking); | ||
3171 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586, | ||
3172 | quirk_broken_intx_masking); | ||
3173 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587, | ||
3174 | quirk_broken_intx_masking); | ||
3175 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588, | ||
3176 | quirk_broken_intx_masking); | ||
3177 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589, | ||
3178 | quirk_broken_intx_masking); | ||
3179 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0, | ||
3180 | quirk_broken_intx_masking); | ||
3181 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1, | ||
3182 | quirk_broken_intx_masking); | ||
3183 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2, | ||
3184 | quirk_broken_intx_masking); | ||
3185 | |||
3153 | static void quirk_no_bus_reset(struct pci_dev *dev) | 3186 | static void quirk_no_bus_reset(struct pci_dev *dev) |
3154 | { | 3187 | { |
3155 | dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; | 3188 | dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; |
@@ -3185,6 +3218,29 @@ static void quirk_no_pm_reset(struct pci_dev *dev) | |||
3185 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, | 3218 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, |
3186 | PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); | 3219 | PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); |
3187 | 3220 | ||
3221 | /* | ||
3222 | * Thunderbolt controllers with broken MSI hotplug signaling: | ||
3223 | * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part | ||
3224 | * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge). | ||
3225 | */ | ||
3226 | static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev) | ||
3227 | { | ||
3228 | if (pdev->is_hotplug_bridge && | ||
3229 | (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C || | ||
3230 | pdev->revision <= 1)) | ||
3231 | pdev->no_msi = 1; | ||
3232 | } | ||
3233 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, | ||
3234 | quirk_thunderbolt_hotplug_msi); | ||
3235 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE, | ||
3236 | quirk_thunderbolt_hotplug_msi); | ||
3237 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK, | ||
3238 | quirk_thunderbolt_hotplug_msi); | ||
3239 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, | ||
3240 | quirk_thunderbolt_hotplug_msi); | ||
3241 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, | ||
3242 | quirk_thunderbolt_hotplug_msi); | ||
3243 | |||
3188 | #ifdef CONFIG_ACPI | 3244 | #ifdef CONFIG_ACPI |
3189 | /* | 3245 | /* |
3190 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. | 3246 | * Apple: Shutdown Cactus Ridge Thunderbolt controller. |
@@ -3232,7 +3288,8 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) | |||
3232 | acpi_execute_simple_method(SXIO, NULL, 0); | 3288 | acpi_execute_simple_method(SXIO, NULL, 0); |
3233 | acpi_execute_simple_method(SXLV, NULL, 0); | 3289 | acpi_execute_simple_method(SXLV, NULL, 0); |
3234 | } | 3290 | } |
3235 | DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547, | 3291 | DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, |
3292 | PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, | ||
3236 | quirk_apple_poweroff_thunderbolt); | 3293 | quirk_apple_poweroff_thunderbolt); |
3237 | 3294 | ||
3238 | /* | 3295 | /* |
@@ -3266,9 +3323,11 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) | |||
3266 | if (!nhi) | 3323 | if (!nhi) |
3267 | goto out; | 3324 | goto out; |
3268 | if (nhi->vendor != PCI_VENDOR_ID_INTEL | 3325 | if (nhi->vendor != PCI_VENDOR_ID_INTEL |
3269 | || (nhi->device != 0x1547 && nhi->device != 0x156c) | 3326 | || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && |
3270 | || nhi->subsystem_vendor != 0x2222 | 3327 | nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && |
3271 | || nhi->subsystem_device != 0x1111) | 3328 | nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) |
3329 | || nhi->subsystem_vendor != 0x2222 | ||
3330 | || nhi->subsystem_device != 0x1111) | ||
3272 | goto out; | 3331 | goto out; |
3273 | dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); | 3332 | dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); |
3274 | device_pm_wait_for_dev(&dev->dev, &nhi->dev); | 3333 | device_pm_wait_for_dev(&dev->dev, &nhi->dev); |
@@ -3276,9 +3335,14 @@ out: | |||
3276 | pci_dev_put(nhi); | 3335 | pci_dev_put(nhi); |
3277 | pci_dev_put(sibling); | 3336 | pci_dev_put(sibling); |
3278 | } | 3337 | } |
3279 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547, | 3338 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, |
3339 | PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, | ||
3280 | quirk_apple_wait_for_thunderbolt); | 3340 | quirk_apple_wait_for_thunderbolt); |
3281 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d, | 3341 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, |
3342 | PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, | ||
3343 | quirk_apple_wait_for_thunderbolt); | ||
3344 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, | ||
3345 | PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, | ||
3282 | quirk_apple_wait_for_thunderbolt); | 3346 | quirk_apple_wait_for_thunderbolt); |
3283 | #endif | 3347 | #endif |
3284 | 3348 | ||
@@ -3610,10 +3674,8 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) | |||
3610 | 3674 | ||
3611 | static void quirk_dma_func0_alias(struct pci_dev *dev) | 3675 | static void quirk_dma_func0_alias(struct pci_dev *dev) |
3612 | { | 3676 | { |
3613 | if (PCI_FUNC(dev->devfn) != 0) { | 3677 | if (PCI_FUNC(dev->devfn) != 0) |
3614 | dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); | 3678 | pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); |
3615 | dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; | ||
3616 | } | ||
3617 | } | 3679 | } |
3618 | 3680 | ||
3619 | /* | 3681 | /* |
@@ -3626,10 +3688,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); | |||
3626 | 3688 | ||
3627 | static void quirk_dma_func1_alias(struct pci_dev *dev) | 3689 | static void quirk_dma_func1_alias(struct pci_dev *dev) |
3628 | { | 3690 | { |
3629 | if (PCI_FUNC(dev->devfn) != 1) { | 3691 | if (PCI_FUNC(dev->devfn) != 1) |
3630 | dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); | 3692 | pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); |
3631 | dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; | ||
3632 | } | ||
3633 | } | 3693 | } |
3634 | 3694 | ||
3635 | /* | 3695 | /* |
@@ -3695,13 +3755,8 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev) | |||
3695 | const struct pci_device_id *id; | 3755 | const struct pci_device_id *id; |
3696 | 3756 | ||
3697 | id = pci_match_id(fixed_dma_alias_tbl, dev); | 3757 | id = pci_match_id(fixed_dma_alias_tbl, dev); |
3698 | if (id) { | 3758 | if (id) |
3699 | dev->dma_alias_devfn = id->driver_data; | 3759 | pci_add_dma_alias(dev, id->driver_data); |
3700 | dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; | ||
3701 | dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", | ||
3702 | PCI_SLOT(dev->dma_alias_devfn), | ||
3703 | PCI_FUNC(dev->dma_alias_devfn)); | ||
3704 | } | ||
3705 | } | 3760 | } |
3706 | 3761 | ||
3707 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); | 3762 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); |
@@ -3734,6 +3789,21 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); | |||
3734 | DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); | 3789 | DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); |
3735 | 3790 | ||
3736 | /* | 3791 | /* |
3792 | * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to | ||
3793 | * be added as aliases to the DMA device in order to allow buffer access | ||
3794 | * when IOMMU is enabled. Following devfns have to match RIT-LUT table | ||
3795 | * programmed in the EEPROM. | ||
3796 | */ | ||
3797 | static void quirk_mic_x200_dma_alias(struct pci_dev *pdev) | ||
3798 | { | ||
3799 | pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0)); | ||
3800 | pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0)); | ||
3801 | pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3)); | ||
3802 | } | ||
3803 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); | ||
3804 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); | ||
3805 | |||
3806 | /* | ||
3737 | * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) | 3807 | * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) |
3738 | * class code. Fix it. | 3808 | * class code. Fix it. |
3739 | */ | 3809 | */ |
@@ -3936,6 +4006,55 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) | |||
3936 | return acs_flags & ~flags ? 0 : 1; | 4006 | return acs_flags & ~flags ? 0 : 1; |
3937 | } | 4007 | } |
3938 | 4008 | ||
4009 | /* | ||
4010 | * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in | ||
4011 | * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, | ||
4012 | * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and | ||
4013 | * control registers whereas the PCIe spec packs them into words (Rev 3.0, | ||
4014 | * 7.16 ACS Extended Capability). The bit definitions are correct, but the | ||
4015 | * control register is at offset 8 instead of 6 and we should probably use | ||
4016 | * dword accesses to them. This applies to the following PCI Device IDs, as | ||
4017 | * found in volume 1 of the datasheet[2]: | ||
4018 | * | ||
4019 | * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16} | ||
4020 | * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20} | ||
4021 | * | ||
4022 | * N.B. This doesn't fix what lspci shows. | ||
4023 | * | ||
4024 | * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html | ||
4025 | * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html | ||
4026 | */ | ||
4027 | static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) | ||
4028 | { | ||
4029 | return pci_is_pcie(dev) && | ||
4030 | pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT && | ||
4031 | ((dev->device & ~0xf) == 0xa110 || | ||
4032 | (dev->device >= 0xa167 && dev->device <= 0xa16a)); | ||
4033 | } | ||
4034 | |||
4035 | #define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4) | ||
4036 | |||
4037 | static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) | ||
4038 | { | ||
4039 | int pos; | ||
4040 | u32 cap, ctrl; | ||
4041 | |||
4042 | if (!pci_quirk_intel_spt_pch_acs_match(dev)) | ||
4043 | return -ENOTTY; | ||
4044 | |||
4045 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | ||
4046 | if (!pos) | ||
4047 | return -ENOTTY; | ||
4048 | |||
4049 | /* see pci_acs_flags_enabled() */ | ||
4050 | pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); | ||
4051 | acs_flags &= (cap | PCI_ACS_EC); | ||
4052 | |||
4053 | pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); | ||
4054 | |||
4055 | return acs_flags & ~ctrl ? 0 : 1; | ||
4056 | } | ||
4057 | |||
3939 | static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) | 4058 | static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) |
3940 | { | 4059 | { |
3941 | /* | 4060 | /* |
@@ -4024,6 +4143,7 @@ static const struct pci_dev_acs_enabled { | |||
4024 | { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, | 4143 | { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, |
4025 | /* Intel PCH root ports */ | 4144 | /* Intel PCH root ports */ |
4026 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, | 4145 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, |
4146 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, | ||
4027 | { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ | 4147 | { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ |
4028 | { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ | 4148 | { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ |
4029 | /* Cavium ThunderX */ | 4149 | /* Cavium ThunderX */ |
@@ -4159,16 +4279,44 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev) | |||
4159 | return 0; | 4279 | return 0; |
4160 | } | 4280 | } |
4161 | 4281 | ||
4282 | static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) | ||
4283 | { | ||
4284 | int pos; | ||
4285 | u32 cap, ctrl; | ||
4286 | |||
4287 | if (!pci_quirk_intel_spt_pch_acs_match(dev)) | ||
4288 | return -ENOTTY; | ||
4289 | |||
4290 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); | ||
4291 | if (!pos) | ||
4292 | return -ENOTTY; | ||
4293 | |||
4294 | pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); | ||
4295 | pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); | ||
4296 | |||
4297 | ctrl |= (cap & PCI_ACS_SV); | ||
4298 | ctrl |= (cap & PCI_ACS_RR); | ||
4299 | ctrl |= (cap & PCI_ACS_CR); | ||
4300 | ctrl |= (cap & PCI_ACS_UF); | ||
4301 | |||
4302 | pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); | ||
4303 | |||
4304 | dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n"); | ||
4305 | |||
4306 | return 0; | ||
4307 | } | ||
4308 | |||
4162 | static const struct pci_dev_enable_acs { | 4309 | static const struct pci_dev_enable_acs { |
4163 | u16 vendor; | 4310 | u16 vendor; |
4164 | u16 device; | 4311 | u16 device; |
4165 | int (*enable_acs)(struct pci_dev *dev); | 4312 | int (*enable_acs)(struct pci_dev *dev); |
4166 | } pci_dev_enable_acs[] = { | 4313 | } pci_dev_enable_acs[] = { |
4167 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs }, | 4314 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs }, |
4315 | { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs }, | ||
4168 | { 0 } | 4316 | { 0 } |
4169 | }; | 4317 | }; |
4170 | 4318 | ||
4171 | void pci_dev_specific_enable_acs(struct pci_dev *dev) | 4319 | int pci_dev_specific_enable_acs(struct pci_dev *dev) |
4172 | { | 4320 | { |
4173 | const struct pci_dev_enable_acs *i; | 4321 | const struct pci_dev_enable_acs *i; |
4174 | int ret; | 4322 | int ret; |
@@ -4180,9 +4328,11 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev) | |||
4180 | i->device == (u16)PCI_ANY_ID)) { | 4328 | i->device == (u16)PCI_ANY_ID)) { |
4181 | ret = i->enable_acs(dev); | 4329 | ret = i->enable_acs(dev); |
4182 | if (ret >= 0) | 4330 | if (ret >= 0) |
4183 | return; | 4331 | return ret; |
4184 | } | 4332 | } |
4185 | } | 4333 | } |
4334 | |||
4335 | return -ENOTTY; | ||
4186 | } | 4336 | } |
4187 | 4337 | ||
4188 | /* | 4338 | /* |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index a20ce7d5e2a7..33e0f033a48e 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -40,11 +40,15 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
40 | * If the device is broken and uses an alias requester ID for | 40 | * If the device is broken and uses an alias requester ID for |
41 | * DMA, iterate over that too. | 41 | * DMA, iterate over that too. |
42 | */ | 42 | */ |
43 | if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { | 43 | if (unlikely(pdev->dma_alias_mask)) { |
44 | ret = fn(pdev, PCI_DEVID(pdev->bus->number, | 44 | u8 devfn; |
45 | pdev->dma_alias_devfn), data); | 45 | |
46 | if (ret) | 46 | for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) { |
47 | return ret; | 47 | ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn), |
48 | data); | ||
49 | if (ret) | ||
50 | return ret; | ||
51 | } | ||
48 | } | 52 | } |
49 | 53 | ||
50 | for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { | 54 | for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { |
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 799634b382c6..1146ff4210a9 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c | |||
@@ -249,7 +249,7 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, | |||
249 | * cfg_read/cfg_write. | 249 | * cfg_read/cfg_write. |
250 | */ | 250 | */ |
251 | tb_ctl_WARN(ctl, | 251 | tb_ctl_WARN(ctl, |
252 | "CFG_ERROR(%llx:%x): Invalid config space of offset\n", | 252 | "CFG_ERROR(%llx:%x): Invalid config space or offset\n", |
253 | res->response_route, res->response_port); | 253 | res->response_route, res->response_port); |
254 | return; | 254 | return; |
255 | case TB_CFG_ERROR_NO_SUCH_PORT: | 255 | case TB_CFG_ERROR_NO_SUCH_PORT: |
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 0dde34e3a7c5..0c052e25c5bc 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c | |||
@@ -221,7 +221,7 @@ struct tb_drom_entry_port { | |||
221 | u8 micro1:4; | 221 | u8 micro1:4; |
222 | u8 micro3; | 222 | u8 micro3; |
223 | 223 | ||
224 | /* BYTES 5-6, TODO: verify (find hardware that has these set) */ | 224 | /* BYTES 6-7, TODO: verify (find hardware that has these set) */ |
225 | u8 peer_port_rid:4; | 225 | u8 peer_port_rid:4; |
226 | u8 unknown3:3; | 226 | u8 unknown3:3; |
227 | bool has_peer_port:1; | 227 | bool has_peer_port:1; |
@@ -388,6 +388,11 @@ int tb_drom_read(struct tb_switch *sw) | |||
388 | sw->ports[4].link_nr = 1; | 388 | sw->ports[4].link_nr = 1; |
389 | sw->ports[3].dual_link_port = &sw->ports[4]; | 389 | sw->ports[3].dual_link_port = &sw->ports[4]; |
390 | sw->ports[4].dual_link_port = &sw->ports[3]; | 390 | sw->ports[4].dual_link_port = &sw->ports[3]; |
391 | |||
392 | /* Port 5 is inaccessible on this gen 1 controller */ | ||
393 | if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE) | ||
394 | sw->ports[5].disabled = true; | ||
395 | |||
391 | return 0; | 396 | return 0; |
392 | } | 397 | } |
393 | 398 | ||
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 20a41f7de76f..9c15344b657a 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c | |||
@@ -37,7 +37,8 @@ static int ring_interrupt_index(struct tb_ring *ring) | |||
37 | */ | 37 | */ |
38 | static void ring_interrupt_active(struct tb_ring *ring, bool active) | 38 | static void ring_interrupt_active(struct tb_ring *ring, bool active) |
39 | { | 39 | { |
40 | int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; | 40 | int reg = REG_RING_INTERRUPT_BASE + |
41 | ring_interrupt_index(ring) / 32 * 4; | ||
41 | int bit = ring_interrupt_index(ring) & 31; | 42 | int bit = ring_interrupt_index(ring) & 31; |
42 | int mask = 1 << bit; | 43 | int mask = 1 << bit; |
43 | u32 old, new; | 44 | u32 old, new; |
@@ -564,7 +565,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
564 | /* cannot fail - table is allocated bin pcim_iomap_regions */ | 565 | /* cannot fail - table is allocated bin pcim_iomap_regions */ |
565 | nhi->iobase = pcim_iomap_table(pdev)[0]; | 566 | nhi->iobase = pcim_iomap_table(pdev)[0]; |
566 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; | 567 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; |
567 | if (nhi->hop_count != 12) | 568 | if (nhi->hop_count != 12 && nhi->hop_count != 32) |
568 | dev_warn(&pdev->dev, "unexpected hop count: %d\n", | 569 | dev_warn(&pdev->dev, "unexpected hop count: %d\n", |
569 | nhi->hop_count); | 570 | nhi->hop_count); |
570 | INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); | 571 | INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); |
@@ -633,16 +634,24 @@ static const struct dev_pm_ops nhi_pm_ops = { | |||
633 | static struct pci_device_id nhi_ids[] = { | 634 | static struct pci_device_id nhi_ids[] = { |
634 | /* | 635 | /* |
635 | * We have to specify class, the TB bridges use the same device and | 636 | * We have to specify class, the TB bridges use the same device and |
636 | * vendor (sub)id. | 637 | * vendor (sub)id on gen 1 and gen 2 controllers. |
637 | */ | 638 | */ |
638 | { | 639 | { |
639 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | 640 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
640 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547, | 641 | .vendor = PCI_VENDOR_ID_INTEL, |
642 | .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, | ||
641 | .subvendor = 0x2222, .subdevice = 0x1111, | 643 | .subvendor = 0x2222, .subdevice = 0x1111, |
642 | }, | 644 | }, |
643 | { | 645 | { |
644 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | 646 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, |
645 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, | 647 | .vendor = PCI_VENDOR_ID_INTEL, |
648 | .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, | ||
649 | .subvendor = 0x2222, .subdevice = 0x1111, | ||
650 | }, | ||
651 | { | ||
652 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | ||
653 | .vendor = PCI_VENDOR_ID_INTEL, | ||
654 | .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, | ||
646 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, | 655 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, |
647 | }, | 656 | }, |
648 | { 0,} | 657 | { 0,} |
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index aeb982969629..1e116f53d6dd 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c | |||
@@ -293,9 +293,9 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) | |||
293 | if (active) { | 293 | if (active) { |
294 | data = data & 0xFFFFFF83; | 294 | data = data & 0xFFFFFF83; |
295 | switch (sw->config.device_id) { | 295 | switch (sw->config.device_id) { |
296 | case 0x1513: | 296 | case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: |
297 | case 0x151a: | 297 | case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE: |
298 | case 0x1549: | 298 | case PCI_DEVICE_ID_INTEL_PORT_RIDGE: |
299 | break; | 299 | break; |
300 | default: | 300 | default: |
301 | data |= 4; | 301 | data |= 4; |
@@ -350,7 +350,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route) | |||
350 | return NULL; | 350 | return NULL; |
351 | 351 | ||
352 | sw->tb = tb; | 352 | sw->tb = tb; |
353 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5)) | 353 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5)) |
354 | goto err; | 354 | goto err; |
355 | tb_info(tb, | 355 | tb_info(tb, |
356 | "initializing Switch at %#llx (depth: %d, up port: %d)\n", | 356 | "initializing Switch at %#llx (depth: %d, up port: %d)\n", |
@@ -370,7 +370,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route) | |||
370 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", | 370 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", |
371 | sw->config.vendor_id); | 371 | sw->config.vendor_id); |
372 | 372 | ||
373 | if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549) | 373 | if (sw->config.device_id != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && |
374 | sw->config.device_id != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && | ||
375 | sw->config.device_id != PCI_DEVICE_ID_INTEL_PORT_RIDGE) | ||
374 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | 376 | tb_sw_warn(sw, "unsupported switch device id %#x\n", |
375 | sw->config.device_id); | 377 | sw->config.device_id); |
376 | 378 | ||
@@ -425,9 +427,9 @@ err: | |||
425 | } | 427 | } |
426 | 428 | ||
427 | /** | 429 | /** |
428 | * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches | 430 | * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches |
429 | */ | 431 | */ |
430 | void tb_sw_set_unpplugged(struct tb_switch *sw) | 432 | void tb_sw_set_unplugged(struct tb_switch *sw) |
431 | { | 433 | { |
432 | int i; | 434 | int i; |
433 | if (sw == sw->tb->root_switch) { | 435 | if (sw == sw->tb->root_switch) { |
@@ -441,7 +443,7 @@ void tb_sw_set_unpplugged(struct tb_switch *sw) | |||
441 | sw->is_unplugged = true; | 443 | sw->is_unplugged = true; |
442 | for (i = 0; i <= sw->config.max_port_number; i++) { | 444 | for (i = 0; i <= sw->config.max_port_number; i++) { |
443 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | 445 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) |
444 | tb_sw_set_unpplugged(sw->ports[i].remote->sw); | 446 | tb_sw_set_unplugged(sw->ports[i].remote->sw); |
445 | } | 447 | } |
446 | } | 448 | } |
447 | 449 | ||
@@ -483,7 +485,7 @@ int tb_switch_resume(struct tb_switch *sw) | |||
483 | || tb_switch_resume(port->remote->sw)) { | 485 | || tb_switch_resume(port->remote->sw)) { |
484 | tb_port_warn(port, | 486 | tb_port_warn(port, |
485 | "lost during suspend, disconnecting\n"); | 487 | "lost during suspend, disconnecting\n"); |
486 | tb_sw_set_unpplugged(port->remote->sw); | 488 | tb_sw_set_unplugged(port->remote->sw); |
487 | } | 489 | } |
488 | } | 490 | } |
489 | return 0; | 491 | return 0; |
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index d2c3fe346e91..24b6d30c3c86 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c | |||
@@ -246,7 +246,7 @@ static void tb_handle_hotplug(struct work_struct *work) | |||
246 | if (ev->unplug) { | 246 | if (ev->unplug) { |
247 | if (port->remote) { | 247 | if (port->remote) { |
248 | tb_port_info(port, "unplugged\n"); | 248 | tb_port_info(port, "unplugged\n"); |
249 | tb_sw_set_unpplugged(port->remote->sw); | 249 | tb_sw_set_unplugged(port->remote->sw); |
250 | tb_free_invalid_tunnels(tb); | 250 | tb_free_invalid_tunnels(tb); |
251 | tb_switch_free(port->remote->sw); | 251 | tb_switch_free(port->remote->sw); |
252 | port->remote = NULL; | 252 | port->remote = NULL; |
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 8b0d7cf2b6d6..61d57ba64035 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h | |||
@@ -226,7 +226,7 @@ void tb_switch_free(struct tb_switch *sw); | |||
226 | void tb_switch_suspend(struct tb_switch *sw); | 226 | void tb_switch_suspend(struct tb_switch *sw); |
227 | int tb_switch_resume(struct tb_switch *sw); | 227 | int tb_switch_resume(struct tb_switch *sw); |
228 | int tb_switch_reset(struct tb *tb, u64 route); | 228 | int tb_switch_reset(struct tb *tb, u64 route); |
229 | void tb_sw_set_unpplugged(struct tb_switch *sw); | 229 | void tb_sw_set_unplugged(struct tb_switch *sw); |
230 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); | 230 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); |
231 | 231 | ||
232 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); | 232 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); |
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 6577af75d9dc..1e2a4a8046be 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h | |||
@@ -30,7 +30,7 @@ enum tb_cap { | |||
30 | TB_CAP_I2C = 0x0005, | 30 | TB_CAP_I2C = 0x0005, |
31 | TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */ | 31 | TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */ |
32 | TB_CAP_TIME2 = 0x0305, | 32 | TB_CAP_TIME2 = 0x0305, |
33 | TB_CAL_IECS = 0x0405, | 33 | TB_CAP_IECS = 0x0405, |
34 | TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */ | 34 | TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */ |
35 | }; | 35 | }; |
36 | 36 | ||