diff options
author | Manikanta Maddireddy <mmaddireddy@nvidia.com> | 2018-09-21 04:35:16 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-11-24 04:20:22 -0500 |
commit | f723fb69fc8c56c2850f7a4c16eb5c2b6dbf6246 (patch) | |
tree | ae8ee09202edc9ead7a3f9c7f9a70238b41e48e2 | |
parent | 55b87f9545bc9faa66ef9621b2e701b3ae7203d5 (diff) |
PCI: tegra: Do controller deinit on PERST# assert
Whenever PERST# asserted deinit the controller and power down pll.
Also set PERST# gpio debounce time to 5 msec to filter out any
fake interrupts caused by transient glitches on PERST#.
bug 200434194
Change-Id: I3fbe1e0f3c316523c1ab0f863abc48794d0d8403
Signed-off-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1816429
(cherry picked from commit 466725862216e8e5dd9f861101758cabe85cd642)
Reviewed-on: https://git-master.nvidia.com/r/1946413
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vidya Sagar <vidyas@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r-- | drivers/pci/ep/pcie-tegra-dw-ep.c | 209 |
1 files changed, 179 insertions, 30 deletions
diff --git a/drivers/pci/ep/pcie-tegra-dw-ep.c b/drivers/pci/ep/pcie-tegra-dw-ep.c index 5071f4738..68ff635a6 100644 --- a/drivers/pci/ep/pcie-tegra-dw-ep.c +++ b/drivers/pci/ep/pcie-tegra-dw-ep.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/reset.h> | 18 | #include <linux/reset.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/gpio.h> | 20 | #include <linux/gpio.h> |
21 | #include <linux/iopoll.h> | ||
21 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
22 | #include <linux/kthread.h> | 23 | #include <linux/kthread.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
@@ -99,6 +100,11 @@ | |||
99 | #define LTR_MSG_REQ BIT(15) | 100 | #define LTR_MSG_REQ BIT(15) |
100 | #define LTR_MST_NO_SNOOP_SHIFT 16 | 101 | #define LTR_MST_NO_SNOOP_SHIFT 16 |
101 | 102 | ||
103 | #define APPL_DEBUG 0xd0 | ||
104 | #define APPL_DEBUG_LTSSM_STATE_MASK 0x1f8 | ||
105 | #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 | ||
106 | #define LTSSM_STATE_PRE_DETECT 0x5 | ||
107 | |||
102 | #define APPL_DM_TYPE 0x100 | 108 | #define APPL_DM_TYPE 0x100 |
103 | #define APPL_DM_TYPE_MASK 0xF | 109 | #define APPL_DM_TYPE_MASK 0xF |
104 | #define APPL_DM_TYPE_EP 0x0 | 110 | #define APPL_DM_TYPE_EP 0x0 |
@@ -235,6 +241,8 @@ | |||
235 | #define TSA_CONFIG_STATIC0_CSW_PCIE5W_0_SO_DEV_HUBID_HUB2 (2) | 241 | #define TSA_CONFIG_STATIC0_CSW_PCIE5W_0_SO_DEV_HUBID_HUB2 (2) |
236 | 242 | ||
237 | #define LTR_MSG_TIMEOUT (100 * 1000) | 243 | #define LTR_MSG_TIMEOUT (100 * 1000) |
244 | #define LTSSM_TIMEOUT 50 | ||
245 | #define PERST_DEBOUNCE_TIME (5 * 1000) | ||
238 | 246 | ||
239 | #define EVENT_QUEUE_LEN (256) | 247 | #define EVENT_QUEUE_LEN (256) |
240 | 248 | ||
@@ -270,7 +278,8 @@ | |||
270 | 278 | ||
271 | enum ep_event { | 279 | enum ep_event { |
272 | EP_EVENT_NONE = 0, | 280 | EP_EVENT_NONE = 0, |
273 | EP_PEX_RST_DE_ASSERT, | 281 | EP_PEX_RST_DEASSERT, |
282 | EP_PEX_RST_ASSERT, | ||
274 | EP_PEX_HOT_RST_DONE, | 283 | EP_PEX_HOT_RST_DONE, |
275 | EP_PEX_BME_CHANGE, | 284 | EP_PEX_BME_CHANGE, |
276 | EP_EVENT_EXIT, | 285 | EP_EVENT_EXIT, |
@@ -293,21 +302,22 @@ struct margin_cmd { | |||
293 | int rxm_payload_check; | 302 | int rxm_payload_check; |
294 | int rxm_cmd_check; | 303 | int rxm_cmd_check; |
295 | }; | 304 | }; |
296 | |||
297 | struct tegra_pcie_dw_ep { | 305 | struct tegra_pcie_dw_ep { |
298 | struct device *dev; | 306 | struct device *dev; |
299 | struct resource *appl_res; | 307 | struct resource *appl_res; |
300 | struct resource *dbi_res; | 308 | struct resource *dbi_res; |
301 | struct resource *atu_dma_res; | 309 | struct resource *atu_dma_res; |
302 | void __iomem *appl_base; | 310 | void __iomem *appl_base; |
303 | void __iomem *dbi_base; | 311 | void __iomem *dbi_base; |
304 | void __iomem *atu_dma_base; | 312 | void __iomem *atu_dma_base; |
305 | struct clk *core_clk; | 313 | struct clk *core_clk; |
306 | struct reset_control *core_apb_rst; | 314 | struct reset_control *core_apb_rst; |
307 | struct reset_control *core_rst; | 315 | struct reset_control *core_rst; |
308 | int irq; | 316 | int irq; |
309 | int phy_count; | 317 | int phy_count; |
310 | struct phy **phy; | 318 | int pex_rst_gpio; |
319 | int ep_state; | ||
320 | struct phy **phy; | ||
311 | struct task_struct *pcie_ep_task; | 321 | struct task_struct *pcie_ep_task; |
312 | wait_queue_head_t wq; | 322 | wait_queue_head_t wq; |
313 | DECLARE_KFIFO(event_fifo, u32, EVENT_QUEUE_LEN); | 323 | DECLARE_KFIFO(event_fifo, u32, EVENT_QUEUE_LEN); |
@@ -336,6 +346,9 @@ struct tegra_pcie_dw_ep { | |||
336 | u32 margin_lane_cntrl; | 346 | u32 margin_lane_cntrl; |
337 | }; | 347 | }; |
338 | 348 | ||
349 | #define EP_STATE_DISABLED 0 | ||
350 | #define EP_STATE_ENABLED 1 | ||
351 | |||
339 | static unsigned int pcie_emc_client_id[] = { | 352 | static unsigned int pcie_emc_client_id[] = { |
340 | TEGRA_BWMGR_CLIENT_PCIE, | 353 | TEGRA_BWMGR_CLIENT_PCIE, |
341 | TEGRA_BWMGR_CLIENT_PCIE_1, | 354 | TEGRA_BWMGR_CLIENT_PCIE_1, |
@@ -390,7 +403,7 @@ static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg) | |||
390 | /* clear any stale PEX_RST interrupt */ | 403 | /* clear any stale PEX_RST interrupt */ |
391 | writel(APPL_INTR_STATUS_L0_PEX_RST_INT, | 404 | writel(APPL_INTR_STATUS_L0_PEX_RST_INT, |
392 | pcie->appl_base + APPL_INTR_STATUS_L0); | 405 | pcie->appl_base + APPL_INTR_STATUS_L0); |
393 | if (!kfifo_put(&pcie->event_fifo, EP_PEX_RST_DE_ASSERT)) { | 406 | if (!kfifo_put(&pcie->event_fifo, EP_PEX_RST_DEASSERT)) { |
394 | dev_err(pcie->dev, "EVENT: fifo is full\n"); | 407 | dev_err(pcie->dev, "EVENT: fifo is full\n"); |
395 | return IRQ_HANDLED; | 408 | return IRQ_HANDLED; |
396 | } | 409 | } |
@@ -472,6 +485,17 @@ static int uphy_bpmp_pcie_ep_controller_pll_init(u32 id) | |||
472 | return bpmp_send_uphy_message(&req, sizeof(req), &resp, sizeof(resp)); | 485 | return bpmp_send_uphy_message(&req, sizeof(req), &resp, sizeof(resp)); |
473 | } | 486 | } |
474 | 487 | ||
488 | static int uphy_bpmp_pcie_ep_controller_pll_off(u32 id) | ||
489 | { | ||
490 | struct mrq_uphy_request req; | ||
491 | struct mrq_uphy_response resp; | ||
492 | |||
493 | req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; | ||
494 | req.ep_ctrlr_pll_off.ep_controller = id; | ||
495 | |||
496 | return bpmp_send_uphy_message(&req, sizeof(req), &resp, sizeof(resp)); | ||
497 | } | ||
498 | |||
475 | static int uphy_bpmp_pcie_controller_state_set(int controller, int enable) | 499 | static int uphy_bpmp_pcie_controller_state_set(int controller, int enable) |
476 | { | 500 | { |
477 | struct mrq_uphy_request req; | 501 | struct mrq_uphy_request req; |
@@ -550,33 +574,91 @@ static void program_gen3_gen4_eq_presets(struct tegra_pcie_dw_ep *pcie) | |||
550 | writel(val, pcie->dbi_base + GEN3_RELATED_OFF); | 574 | writel(val, pcie->dbi_base + GEN3_RELATED_OFF); |
551 | } | 575 | } |
552 | 576 | ||
577 | static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw_ep *pcie) | ||
578 | { | ||
579 | u32 val = 0; | ||
580 | int ret = 0, count = 0; | ||
581 | |||
582 | if (pcie->ep_state == EP_STATE_DISABLED) | ||
583 | return; | ||
584 | |||
585 | /* disable LTSSM */ | ||
586 | val = readl(pcie->appl_base + APPL_CTRL); | ||
587 | val &= ~APPL_CTRL_LTSSM_EN; | ||
588 | writel(val, pcie->appl_base + APPL_CTRL); | ||
589 | |||
590 | ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, | ||
591 | ((val & APPL_DEBUG_LTSSM_STATE_MASK) >> | ||
592 | APPL_DEBUG_LTSSM_STATE_SHIFT) == | ||
593 | LTSSM_STATE_PRE_DETECT, | ||
594 | 1, LTSSM_TIMEOUT); | ||
595 | if (ret) | ||
596 | dev_info(pcie->dev, "Link didn't go to detect state\n"); | ||
597 | |||
598 | reset_control_assert(pcie->core_rst); | ||
599 | |||
600 | for (count = 0; count < pcie->phy_count; count++) | ||
601 | phy_power_off(pcie->phy[count]); | ||
602 | |||
603 | reset_control_assert(pcie->core_apb_rst); | ||
604 | clk_disable_unprepare(pcie->core_clk); | ||
605 | |||
606 | /* | ||
607 | * If PCIe partition is ungated it will request PLL power ON, | ||
608 | * so PLL sequencer will be in SEQ_ON state. To turn off the | ||
609 | * PLL sequencer, power gate PCIe partition. | ||
610 | */ | ||
611 | ret = pm_runtime_put_sync(pcie->dev); | ||
612 | if (ret < 0) | ||
613 | dev_err(pcie->dev, "runtime suspend failed: %d\n", ret); | ||
614 | |||
615 | if (!(pcie->cid == CTRL_4 && pcie->num_lanes == 1)) { | ||
616 | /* Resets PLL CAL_VALID and RCAL_VALID */ | ||
617 | ret = uphy_bpmp_pcie_ep_controller_pll_off(pcie->cid); | ||
618 | if (ret) | ||
619 | dev_err(pcie->dev, "UPHY off failed for PCIe EP:%d\n", | ||
620 | ret); | ||
621 | } | ||
622 | |||
623 | pcie->ep_state = EP_STATE_DISABLED; | ||
624 | dev_info(pcie->dev, "EP deinit done\n"); | ||
625 | } | ||
626 | |||
553 | static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw_ep *pcie) | 627 | static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw_ep *pcie) |
554 | { | 628 | { |
555 | u32 val = 0; | 629 | u32 val = 0; |
556 | int ret = 0; | 630 | int ret = 0; |
557 | 631 | ||
632 | if (pcie->ep_state == EP_STATE_ENABLED) | ||
633 | return; | ||
634 | |||
635 | ret = pm_runtime_get_sync(pcie->dev); | ||
636 | if (ret < 0) { | ||
637 | dev_err(pcie->dev, "runtime resume failed: %d\n", ret); | ||
638 | return; | ||
639 | } | ||
640 | |||
558 | if (!(pcie->cid == CTRL_4 && pcie->num_lanes == 1)) { | 641 | if (!(pcie->cid == CTRL_4 && pcie->num_lanes == 1)) { |
559 | ret = uphy_bpmp_pcie_ep_controller_pll_init(pcie->cid); | 642 | ret = uphy_bpmp_pcie_ep_controller_pll_init(pcie->cid); |
560 | if (ret) { | 643 | if (ret) { |
561 | dev_err(pcie->dev, "UPHY init failed for PCIe EP:%d\n", | 644 | dev_err(pcie->dev, "UPHY init failed for PCIe EP:%d\n", |
562 | ret); | 645 | ret); |
563 | return; | 646 | goto pll_fail; |
564 | } | 647 | } |
565 | } | 648 | } |
566 | 649 | ||
567 | ret = clk_prepare_enable(pcie->core_clk); | 650 | ret = clk_prepare_enable(pcie->core_clk); |
568 | if (ret) { | 651 | if (ret) { |
569 | dev_err(pcie->dev, "Failed to enable core clock\n"); | 652 | dev_err(pcie->dev, "Failed to enable core clock\n"); |
570 | return; | 653 | goto pll_fail; |
571 | } | 654 | } |
572 | 655 | ||
573 | reset_control_assert(pcie->core_apb_rst); | ||
574 | reset_control_deassert(pcie->core_apb_rst); | 656 | reset_control_deassert(pcie->core_apb_rst); |
575 | 657 | ||
576 | ret = tegra_pcie_power_on_phy(pcie); | 658 | ret = tegra_pcie_power_on_phy(pcie); |
577 | if (ret) { | 659 | if (ret) { |
578 | dev_err(pcie->dev, "failed to power_on phy\n"); | 660 | dev_err(pcie->dev, "failed to power_on phy\n"); |
579 | return; | 661 | goto phy_fail; |
580 | } | 662 | } |
581 | 663 | ||
582 | /* clear any stale interrupt statuses */ | 664 | /* clear any stale interrupt statuses */ |
@@ -647,7 +729,6 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw_ep *pcie) | |||
647 | val |= APPL_INTR_EN_L1_0_HOT_RESET_DONE_INT_EN; | 729 | val |= APPL_INTR_EN_L1_0_HOT_RESET_DONE_INT_EN; |
648 | writel(val, pcie->appl_base + APPL_INTR_EN_L1_0); | 730 | writel(val, pcie->appl_base + APPL_INTR_EN_L1_0); |
649 | 731 | ||
650 | reset_control_assert(pcie->core_rst); | ||
651 | reset_control_deassert(pcie->core_rst); | 732 | reset_control_deassert(pcie->core_rst); |
652 | 733 | ||
653 | /* FPGA specific PHY initialization */ | 734 | /* FPGA specific PHY initialization */ |
@@ -764,6 +845,21 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw_ep *pcie) | |||
764 | val = readl(pcie->appl_base + APPL_CTRL); | 845 | val = readl(pcie->appl_base + APPL_CTRL); |
765 | val |= APPL_CTRL_LTSSM_EN; | 846 | val |= APPL_CTRL_LTSSM_EN; |
766 | writel(val, pcie->appl_base + APPL_CTRL); | 847 | writel(val, pcie->appl_base + APPL_CTRL); |
848 | |||
849 | pcie->ep_state = EP_STATE_ENABLED; | ||
850 | dev_info(pcie->dev, "EP init done\n"); | ||
851 | |||
852 | return; | ||
853 | |||
854 | phy_fail: | ||
855 | reset_control_assert(pcie->core_apb_rst); | ||
856 | clk_disable_unprepare(pcie->core_clk); | ||
857 | pll_fail: | ||
858 | ret = pm_runtime_put_sync(pcie->dev); | ||
859 | if (ret < 0) | ||
860 | dev_err(pcie->dev, "runtime suspend failed: %d\n", ret); | ||
861 | |||
862 | return; | ||
767 | } | 863 | } |
768 | 864 | ||
769 | static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw_ep *pcie) | 865 | static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw_ep *pcie) |
@@ -864,11 +960,16 @@ static int pcie_ep_work_thread(void *p) | |||
864 | } | 960 | } |
865 | 961 | ||
866 | switch (event) { | 962 | switch (event) { |
867 | case EP_PEX_RST_DE_ASSERT: | 963 | case EP_PEX_RST_DEASSERT: |
868 | dev_dbg(pcie->dev, "EP_EVENT: EP_PEX_RST_DE_ASSERT\n"); | 964 | dev_dbg(pcie->dev, "EP_EVENT: EP_PEX_RST_DEASSERT\n"); |
869 | pex_ep_event_pex_rst_deassert(pcie); | 965 | pex_ep_event_pex_rst_deassert(pcie); |
870 | break; | 966 | break; |
871 | 967 | ||
968 | case EP_PEX_RST_ASSERT: | ||
969 | dev_dbg(pcie->dev, "EP_EVENT: EP_PEX_RST_ASSERT\n"); | ||
970 | pex_ep_event_pex_rst_assert(pcie); | ||
971 | break; | ||
972 | |||
872 | case EP_PEX_HOT_RST_DONE: | 973 | case EP_PEX_HOT_RST_DONE: |
873 | dev_dbg(pcie->dev, "EP_EVENT: EP_PEX_HOT_RST_DONE\n"); | 974 | dev_dbg(pcie->dev, "EP_EVENT: EP_PEX_HOT_RST_DONE\n"); |
874 | pex_ep_event_hot_rst_done(pcie); | 975 | pex_ep_event_hot_rst_done(pcie); |
@@ -947,10 +1048,20 @@ static irqreturn_t pex_rst_isr(int irq, void *arg) | |||
947 | { | 1048 | { |
948 | struct tegra_pcie_dw_ep *pcie = arg; | 1049 | struct tegra_pcie_dw_ep *pcie = arg; |
949 | 1050 | ||
950 | if (!kfifo_put(&pcie->event_fifo, EP_PEX_RST_DE_ASSERT)) { | 1051 | if (gpio_get_value(pcie->pex_rst_gpio)) { |
951 | dev_err(pcie->dev, "EVENT: fifo is full\n"); | 1052 | dev_dbg(pcie->dev, "EVENT: EP_PEX_RST_DEASSERT\n"); |
952 | return IRQ_HANDLED; | 1053 | if (!kfifo_put(&pcie->event_fifo, EP_PEX_RST_DEASSERT)) { |
1054 | dev_err(pcie->dev, "EVENT: fifo is full\n"); | ||
1055 | return IRQ_HANDLED; | ||
1056 | } | ||
1057 | } else { | ||
1058 | dev_dbg(pcie->dev, "EVENT: EP_PEX_RST_ASSERT\n"); | ||
1059 | if (!kfifo_put(&pcie->event_fifo, EP_PEX_RST_ASSERT)) { | ||
1060 | dev_err(pcie->dev, "EVENT: fifo is full\n"); | ||
1061 | return IRQ_HANDLED; | ||
1062 | } | ||
953 | } | 1063 | } |
1064 | |||
954 | wake_up(&pcie->wq); | 1065 | wake_up(&pcie->wq); |
955 | return IRQ_HANDLED; | 1066 | return IRQ_HANDLED; |
956 | } | 1067 | } |
@@ -1300,10 +1411,10 @@ static int tegra_pcie_dw_ep_probe(struct platform_device *pdev) | |||
1300 | struct phy **phy; | 1411 | struct phy **phy; |
1301 | struct pinctrl *pin = NULL; | 1412 | struct pinctrl *pin = NULL; |
1302 | struct pinctrl_state *pin_state = NULL; | 1413 | struct pinctrl_state *pin_state = NULL; |
1414 | struct gpio_desc *gpiod; | ||
1303 | char *name; | 1415 | char *name; |
1304 | int phy_count; | 1416 | int phy_count; |
1305 | u32 i = 0, val = 0, addr = 0; | 1417 | u32 i = 0, val = 0, addr = 0; |
1306 | int pex_rst_gpio; | ||
1307 | int irq; | 1418 | int irq; |
1308 | int ret = 0; | 1419 | int ret = 0; |
1309 | 1420 | ||
@@ -1312,6 +1423,7 @@ static int tegra_pcie_dw_ep_probe(struct platform_device *pdev) | |||
1312 | return -ENOMEM; | 1423 | return -ENOMEM; |
1313 | 1424 | ||
1314 | pcie->dev = &pdev->dev; | 1425 | pcie->dev = &pdev->dev; |
1426 | pcie->ep_state = EP_STATE_DISABLED; | ||
1315 | 1427 | ||
1316 | ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); | 1428 | ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); |
1317 | if (ret < 0) { | 1429 | if (ret < 0) { |
@@ -1583,29 +1695,42 @@ static int tegra_pcie_dw_ep_probe(struct platform_device *pdev) | |||
1583 | goto fail_thread; | 1695 | goto fail_thread; |
1584 | } | 1696 | } |
1585 | 1697 | ||
1586 | pex_rst_gpio = of_get_named_gpio(np, "nvidia,pex-rst-gpio", 0); | 1698 | pcie->pex_rst_gpio = of_get_named_gpio(np, "nvidia,pex-rst-gpio", 0); |
1587 | if (!gpio_is_valid(pex_rst_gpio)) { | 1699 | if (!gpio_is_valid(pcie->pex_rst_gpio)) { |
1588 | dev_err(pcie->dev, "pex-rst-gpio is missing\n"); | 1700 | dev_err(pcie->dev, "pex-rst-gpio is missing\n"); |
1589 | ret = pex_rst_gpio; | 1701 | ret = pcie->pex_rst_gpio; |
1590 | goto fail_thread; | 1702 | goto fail_thread; |
1591 | } | 1703 | } |
1592 | ret = devm_gpio_request(pcie->dev, pex_rst_gpio, "pex_rst_gpio"); | 1704 | ret = devm_gpio_request(pcie->dev, pcie->pex_rst_gpio, "pex_rst_gpio"); |
1593 | if (ret < 0) { | 1705 | if (ret < 0) { |
1594 | dev_err(pcie->dev, "pex_rst_gpio request failed\n"); | 1706 | dev_err(pcie->dev, "pex_rst_gpio request failed\n"); |
1595 | goto fail_thread; | 1707 | goto fail_thread; |
1596 | } | 1708 | } |
1597 | ret = gpio_direction_input(pex_rst_gpio); | 1709 | ret = gpio_direction_input(pcie->pex_rst_gpio); |
1598 | if (ret < 0) { | 1710 | if (ret < 0) { |
1599 | dev_err(pcie->dev, "pex_rst_gpio direction input failed\n"); | 1711 | dev_err(pcie->dev, "pex_rst_gpio direction input failed\n"); |
1600 | goto fail_thread; | 1712 | goto fail_thread; |
1601 | } | 1713 | } |
1602 | irq = gpio_to_irq(pex_rst_gpio); | 1714 | gpiod = gpio_to_desc(pcie->pex_rst_gpio); |
1715 | if (!gpiod) { | ||
1716 | dev_err(pcie->dev, "Unable to get gpio desc\n"); | ||
1717 | ret = -EINVAL; | ||
1718 | goto fail_thread; | ||
1719 | } | ||
1720 | ret = gpiod_set_debounce(gpiod, PERST_DEBOUNCE_TIME); | ||
1721 | if (ret < 0) { | ||
1722 | dev_err(pcie->dev, "Unable to set gpio debounce time\n"); | ||
1723 | goto fail_thread; | ||
1724 | } | ||
1725 | irq = gpio_to_irq(pcie->pex_rst_gpio); | ||
1603 | if (irq < 0) { | 1726 | if (irq < 0) { |
1604 | dev_err(pcie->dev, "Unable to get irq for pex_rst_gpio\n"); | 1727 | dev_err(pcie->dev, "Unable to get irq for pex_rst_gpio\n"); |
1728 | ret = irq; | ||
1605 | goto fail_thread; | 1729 | goto fail_thread; |
1606 | } | 1730 | } |
1607 | ret = devm_request_irq(pcie->dev, (unsigned int)irq, pex_rst_isr, | 1731 | ret = devm_request_irq(pcie->dev, (unsigned int)irq, pex_rst_isr, |
1608 | IRQF_TRIGGER_RISING, "pex_rst", (void *)pcie); | 1732 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
1733 | "pex_rst", (void *)pcie); | ||
1609 | if (ret < 0) { | 1734 | if (ret < 0) { |
1610 | dev_err(pcie->dev, "Unable to request irq for pex_rst\n"); | 1735 | dev_err(pcie->dev, "Unable to request irq for pex_rst\n"); |
1611 | goto fail_thread; | 1736 | goto fail_thread; |
@@ -1624,6 +1749,7 @@ static int tegra_pcie_dw_ep_probe(struct platform_device *pdev) | |||
1624 | kfree(name); | 1749 | kfree(name); |
1625 | 1750 | ||
1626 | platform_set_drvdata(pdev, pcie); | 1751 | platform_set_drvdata(pdev, pcie); |
1752 | pm_runtime_enable(pcie->dev); | ||
1627 | 1753 | ||
1628 | return ret; | 1754 | return ret; |
1629 | 1755 | ||
@@ -1676,12 +1802,35 @@ static const struct of_device_id tegra_pcie_dw_ep_of_match[] = { | |||
1676 | }; | 1802 | }; |
1677 | MODULE_DEVICE_TABLE(of, tegra_pcie_dw_ep_of_match); | 1803 | MODULE_DEVICE_TABLE(of, tegra_pcie_dw_ep_of_match); |
1678 | 1804 | ||
1805 | /* | ||
1806 | * Powergate driver registers gate/ungate callback functions to power domain. | ||
1807 | * PCIe EP driver need to register runtime pm callback functions to gate/ungate | ||
1808 | * power partition and there is no other work to do in these functions. | ||
1809 | */ | ||
1810 | static int tegra_pcie_dw_ep_runtime_suspend(struct device *dev) | ||
1811 | { | ||
1812 | return 0; | ||
1813 | } | ||
1814 | |||
1815 | static int tegra_pcie_dw_ep_runtime_resume(struct device *dev) | ||
1816 | { | ||
1817 | return 0; | ||
1818 | } | ||
1819 | |||
1820 | static const struct dev_pm_ops tegra_pcie_dw_ep_pm_ops = { | ||
1821 | .runtime_suspend = tegra_pcie_dw_ep_runtime_suspend, | ||
1822 | .runtime_resume = tegra_pcie_dw_ep_runtime_resume, | ||
1823 | }; | ||
1824 | |||
1679 | static struct platform_driver tegra_pcie_dw_ep_driver = { | 1825 | static struct platform_driver tegra_pcie_dw_ep_driver = { |
1680 | .probe = tegra_pcie_dw_ep_probe, | 1826 | .probe = tegra_pcie_dw_ep_probe, |
1681 | .remove = tegra_pcie_dw_ep_remove, | 1827 | .remove = tegra_pcie_dw_ep_remove, |
1682 | .driver = { | 1828 | .driver = { |
1683 | .name = "tegra-pcie-dw-ep", | 1829 | .name = "tegra-pcie-dw-ep", |
1684 | .of_match_table = tegra_pcie_dw_ep_of_match, | 1830 | .of_match_table = tegra_pcie_dw_ep_of_match, |
1831 | #ifdef CONFIG_PM | ||
1832 | .pm = &tegra_pcie_dw_ep_pm_ops, | ||
1833 | #endif | ||
1685 | }, | 1834 | }, |
1686 | }; | 1835 | }; |
1687 | 1836 | ||