diff options
Diffstat (limited to 'drivers/scsi/ufs/ufshcd.c')
-rw-r--r-- | drivers/scsi/ufs/ufshcd.c | 2514 |
1 files changed, 2326 insertions, 188 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index ba27215b8034..497c38a4a866 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * | 3 | * |
4 | * This code is based on drivers/scsi/ufs/ufshcd.c | 4 | * This code is based on drivers/scsi/ufs/ufshcd.c |
5 | * Copyright (C) 2011-2013 Samsung India Software Operations | 5 | * Copyright (C) 2011-2013 Samsung India Software Operations |
6 | * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. | ||
6 | * | 7 | * |
7 | * Authors: | 8 | * Authors: |
8 | * Santosh Yaraganavi <santosh.sy@samsung.com> | 9 | * Santosh Yaraganavi <santosh.sy@samsung.com> |
@@ -31,16 +32,19 @@ | |||
31 | * circumstances will the contributor of this Program be liable for | 32 | * circumstances will the contributor of this Program be liable for |
32 | * any damages of any kind arising from your use or distribution of | 33 | * any damages of any kind arising from your use or distribution of |
33 | * this program. | 34 | * this program. |
35 | * | ||
36 | * The Linux Foundation chooses to take subject only to the GPLv2 | ||
37 | * license terms, and distributes only under these terms. | ||
34 | */ | 38 | */ |
35 | 39 | ||
36 | #include <linux/async.h> | 40 | #include <linux/async.h> |
41 | #include <linux/devfreq.h> | ||
37 | 42 | ||
38 | #include "ufshcd.h" | 43 | #include "ufshcd.h" |
39 | #include "unipro.h" | 44 | #include "unipro.h" |
40 | 45 | ||
41 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ | 46 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
42 | UTP_TASK_REQ_COMPL |\ | 47 | UTP_TASK_REQ_COMPL |\ |
43 | UIC_POWER_MODE |\ | ||
44 | UFSHCD_ERROR_MASK) | 48 | UFSHCD_ERROR_MASK) |
45 | /* UIC command timeout, unit: ms */ | 49 | /* UIC command timeout, unit: ms */ |
46 | #define UIC_CMD_TIMEOUT 500 | 50 | #define UIC_CMD_TIMEOUT 500 |
@@ -58,16 +62,44 @@ | |||
58 | /* Task management command timeout */ | 62 | /* Task management command timeout */ |
59 | #define TM_CMD_TIMEOUT 100 /* msecs */ | 63 | #define TM_CMD_TIMEOUT 100 /* msecs */ |
60 | 64 | ||
65 | /* maximum number of link-startup retries */ | ||
66 | #define DME_LINKSTARTUP_RETRIES 3 | ||
67 | |||
68 | /* maximum number of reset retries before giving up */ | ||
69 | #define MAX_HOST_RESET_RETRIES 5 | ||
70 | |||
61 | /* Expose the flag value from utp_upiu_query.value */ | 71 | /* Expose the flag value from utp_upiu_query.value */ |
62 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | 72 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF |
63 | 73 | ||
64 | /* Interrupt aggregation default timeout, unit: 40us */ | 74 | /* Interrupt aggregation default timeout, unit: 40us */ |
65 | #define INT_AGGR_DEF_TO 0x02 | 75 | #define INT_AGGR_DEF_TO 0x02 |
66 | 76 | ||
77 | #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ | ||
78 | ({ \ | ||
79 | int _ret; \ | ||
80 | if (_on) \ | ||
81 | _ret = ufshcd_enable_vreg(_dev, _vreg); \ | ||
82 | else \ | ||
83 | _ret = ufshcd_disable_vreg(_dev, _vreg); \ | ||
84 | _ret; \ | ||
85 | }) | ||
86 | |||
87 | static u32 ufs_query_desc_max_size[] = { | ||
88 | QUERY_DESC_DEVICE_MAX_SIZE, | ||
89 | QUERY_DESC_CONFIGURAION_MAX_SIZE, | ||
90 | QUERY_DESC_UNIT_MAX_SIZE, | ||
91 | QUERY_DESC_RFU_MAX_SIZE, | ||
92 | QUERY_DESC_INTERCONNECT_MAX_SIZE, | ||
93 | QUERY_DESC_STRING_MAX_SIZE, | ||
94 | QUERY_DESC_RFU_MAX_SIZE, | ||
95 | QUERY_DESC_GEOMETRY_MAZ_SIZE, | ||
96 | QUERY_DESC_POWER_MAX_SIZE, | ||
97 | QUERY_DESC_RFU_MAX_SIZE, | ||
98 | }; | ||
99 | |||
67 | enum { | 100 | enum { |
68 | UFSHCD_MAX_CHANNEL = 0, | 101 | UFSHCD_MAX_CHANNEL = 0, |
69 | UFSHCD_MAX_ID = 1, | 102 | UFSHCD_MAX_ID = 1, |
70 | UFSHCD_MAX_LUNS = 8, | ||
71 | UFSHCD_CMD_PER_LUN = 32, | 103 | UFSHCD_CMD_PER_LUN = 32, |
72 | UFSHCD_CAN_QUEUE = 32, | 104 | UFSHCD_CAN_QUEUE = 32, |
73 | }; | 105 | }; |
@@ -106,12 +138,79 @@ enum { | |||
106 | #define ufshcd_clear_eh_in_progress(h) \ | 138 | #define ufshcd_clear_eh_in_progress(h) \ |
107 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) | 139 | (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) |
108 | 140 | ||
141 | #define ufshcd_set_ufs_dev_active(h) \ | ||
142 | ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) | ||
143 | #define ufshcd_set_ufs_dev_sleep(h) \ | ||
144 | ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE) | ||
145 | #define ufshcd_set_ufs_dev_poweroff(h) \ | ||
146 | ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE) | ||
147 | #define ufshcd_is_ufs_dev_active(h) \ | ||
148 | ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE) | ||
149 | #define ufshcd_is_ufs_dev_sleep(h) \ | ||
150 | ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE) | ||
151 | #define ufshcd_is_ufs_dev_poweroff(h) \ | ||
152 | ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE) | ||
153 | |||
154 | static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = { | ||
155 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | ||
156 | {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
157 | {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE}, | ||
158 | {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
159 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE}, | ||
160 | {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE}, | ||
161 | }; | ||
162 | |||
163 | static inline enum ufs_dev_pwr_mode | ||
164 | ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl) | ||
165 | { | ||
166 | return ufs_pm_lvl_states[lvl].dev_state; | ||
167 | } | ||
168 | |||
169 | static inline enum uic_link_state | ||
170 | ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl) | ||
171 | { | ||
172 | return ufs_pm_lvl_states[lvl].link_state; | ||
173 | } | ||
174 | |||
109 | static void ufshcd_tmc_handler(struct ufs_hba *hba); | 175 | static void ufshcd_tmc_handler(struct ufs_hba *hba); |
110 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); | 176 | static void ufshcd_async_scan(void *data, async_cookie_t cookie); |
111 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); | 177 | static int ufshcd_reset_and_restore(struct ufs_hba *hba); |
112 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); | 178 | static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); |
113 | static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, | 179 | static void ufshcd_hba_exit(struct ufs_hba *hba); |
114 | struct scsi_device *sdev); | 180 | static int ufshcd_probe_hba(struct ufs_hba *hba); |
181 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, | ||
182 | bool skip_ref_clk); | ||
183 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); | ||
184 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); | ||
185 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); | ||
186 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); | ||
187 | static irqreturn_t ufshcd_intr(int irq, void *__hba); | ||
188 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, | ||
189 | struct ufs_pa_layer_attr *desired_pwr_mode); | ||
190 | |||
191 | static inline int ufshcd_enable_irq(struct ufs_hba *hba) | ||
192 | { | ||
193 | int ret = 0; | ||
194 | |||
195 | if (!hba->is_irq_enabled) { | ||
196 | ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, | ||
197 | hba); | ||
198 | if (ret) | ||
199 | dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", | ||
200 | __func__, ret); | ||
201 | hba->is_irq_enabled = true; | ||
202 | } | ||
203 | |||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | static inline void ufshcd_disable_irq(struct ufs_hba *hba) | ||
208 | { | ||
209 | if (hba->is_irq_enabled) { | ||
210 | free_irq(hba->irq, hba); | ||
211 | hba->is_irq_enabled = false; | ||
212 | } | ||
213 | } | ||
115 | 214 | ||
116 | /* | 215 | /* |
117 | * ufshcd_wait_for_register - wait for register value to change | 216 | * ufshcd_wait_for_register - wait for register value to change |
@@ -175,13 +274,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |||
175 | /** | 274 | /** |
176 | * ufshcd_is_device_present - Check if any device connected to | 275 | * ufshcd_is_device_present - Check if any device connected to |
177 | * the host controller | 276 | * the host controller |
178 | * @reg_hcs - host controller status register value | 277 | * @hba: pointer to adapter instance |
179 | * | 278 | * |
180 | * Returns 1 if device present, 0 if no device detected | 279 | * Returns 1 if device present, 0 if no device detected |
181 | */ | 280 | */ |
182 | static inline int ufshcd_is_device_present(u32 reg_hcs) | 281 | static inline int ufshcd_is_device_present(struct ufs_hba *hba) |
183 | { | 282 | { |
184 | return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; | 283 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & |
284 | DEVICE_PRESENT) ? 1 : 0; | ||
185 | } | 285 | } |
186 | 286 | ||
187 | /** | 287 | /** |
@@ -413,6 +513,265 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | |||
413 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; | 513 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; |
414 | } | 514 | } |
415 | 515 | ||
516 | static void ufshcd_ungate_work(struct work_struct *work) | ||
517 | { | ||
518 | int ret; | ||
519 | unsigned long flags; | ||
520 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | ||
521 | clk_gating.ungate_work); | ||
522 | |||
523 | cancel_delayed_work_sync(&hba->clk_gating.gate_work); | ||
524 | |||
525 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
526 | if (hba->clk_gating.state == CLKS_ON) { | ||
527 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
528 | goto unblock_reqs; | ||
529 | } | ||
530 | |||
531 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
532 | ufshcd_setup_clocks(hba, true); | ||
533 | |||
534 | /* Exit from hibern8 */ | ||
535 | if (ufshcd_can_hibern8_during_gating(hba)) { | ||
536 | /* Prevent gating in this path */ | ||
537 | hba->clk_gating.is_suspended = true; | ||
538 | if (ufshcd_is_link_hibern8(hba)) { | ||
539 | ret = ufshcd_uic_hibern8_exit(hba); | ||
540 | if (ret) | ||
541 | dev_err(hba->dev, "%s: hibern8 exit failed %d\n", | ||
542 | __func__, ret); | ||
543 | else | ||
544 | ufshcd_set_link_active(hba); | ||
545 | } | ||
546 | hba->clk_gating.is_suspended = false; | ||
547 | } | ||
548 | unblock_reqs: | ||
549 | if (ufshcd_is_clkscaling_enabled(hba)) | ||
550 | devfreq_resume_device(hba->devfreq); | ||
551 | scsi_unblock_requests(hba->host); | ||
552 | } | ||
553 | |||
554 | /** | ||
555 | * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. | ||
556 | * Also, exit from hibern8 mode and set the link as active. | ||
557 | * @hba: per adapter instance | ||
558 | * @async: This indicates whether caller should ungate clocks asynchronously. | ||
559 | */ | ||
560 | int ufshcd_hold(struct ufs_hba *hba, bool async) | ||
561 | { | ||
562 | int rc = 0; | ||
563 | unsigned long flags; | ||
564 | |||
565 | if (!ufshcd_is_clkgating_allowed(hba)) | ||
566 | goto out; | ||
567 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
568 | hba->clk_gating.active_reqs++; | ||
569 | |||
570 | start: | ||
571 | switch (hba->clk_gating.state) { | ||
572 | case CLKS_ON: | ||
573 | break; | ||
574 | case REQ_CLKS_OFF: | ||
575 | if (cancel_delayed_work(&hba->clk_gating.gate_work)) { | ||
576 | hba->clk_gating.state = CLKS_ON; | ||
577 | break; | ||
578 | } | ||
579 | /* | ||
580 | * If we here, it means gating work is either done or | ||
581 | * currently running. Hence, fall through to cancel gating | ||
582 | * work and to enable clocks. | ||
583 | */ | ||
584 | case CLKS_OFF: | ||
585 | scsi_block_requests(hba->host); | ||
586 | hba->clk_gating.state = REQ_CLKS_ON; | ||
587 | schedule_work(&hba->clk_gating.ungate_work); | ||
588 | /* | ||
589 | * fall through to check if we should wait for this | ||
590 | * work to be done or not. | ||
591 | */ | ||
592 | case REQ_CLKS_ON: | ||
593 | if (async) { | ||
594 | rc = -EAGAIN; | ||
595 | hba->clk_gating.active_reqs--; | ||
596 | break; | ||
597 | } | ||
598 | |||
599 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
600 | flush_work(&hba->clk_gating.ungate_work); | ||
601 | /* Make sure state is CLKS_ON before returning */ | ||
602 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
603 | goto start; | ||
604 | default: | ||
605 | dev_err(hba->dev, "%s: clk gating is in invalid state %d\n", | ||
606 | __func__, hba->clk_gating.state); | ||
607 | break; | ||
608 | } | ||
609 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
610 | out: | ||
611 | return rc; | ||
612 | } | ||
613 | |||
614 | static void ufshcd_gate_work(struct work_struct *work) | ||
615 | { | ||
616 | struct ufs_hba *hba = container_of(work, struct ufs_hba, | ||
617 | clk_gating.gate_work.work); | ||
618 | unsigned long flags; | ||
619 | |||
620 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
621 | if (hba->clk_gating.is_suspended) { | ||
622 | hba->clk_gating.state = CLKS_ON; | ||
623 | goto rel_lock; | ||
624 | } | ||
625 | |||
626 | if (hba->clk_gating.active_reqs | ||
627 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | ||
628 | || hba->lrb_in_use || hba->outstanding_tasks | ||
629 | || hba->active_uic_cmd || hba->uic_async_done) | ||
630 | goto rel_lock; | ||
631 | |||
632 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
633 | |||
634 | /* put the link into hibern8 mode before turning off clocks */ | ||
635 | if (ufshcd_can_hibern8_during_gating(hba)) { | ||
636 | if (ufshcd_uic_hibern8_enter(hba)) { | ||
637 | hba->clk_gating.state = CLKS_ON; | ||
638 | goto out; | ||
639 | } | ||
640 | ufshcd_set_link_hibern8(hba); | ||
641 | } | ||
642 | |||
643 | if (ufshcd_is_clkscaling_enabled(hba)) { | ||
644 | devfreq_suspend_device(hba->devfreq); | ||
645 | hba->clk_scaling.window_start_t = 0; | ||
646 | } | ||
647 | |||
648 | if (!ufshcd_is_link_active(hba)) | ||
649 | ufshcd_setup_clocks(hba, false); | ||
650 | else | ||
651 | /* If link is active, device ref_clk can't be switched off */ | ||
652 | __ufshcd_setup_clocks(hba, false, true); | ||
653 | |||
654 | /* | ||
655 | * In case you are here to cancel this work the gating state | ||
656 | * would be marked as REQ_CLKS_ON. In this case keep the state | ||
657 | * as REQ_CLKS_ON which would anyway imply that clocks are off | ||
658 | * and a request to turn them on is pending. By doing this way, | ||
659 | * we keep the state machine in tact and this would ultimately | ||
660 | * prevent from doing cancel work multiple times when there are | ||
661 | * new requests arriving before the current cancel work is done. | ||
662 | */ | ||
663 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
664 | if (hba->clk_gating.state == REQ_CLKS_OFF) | ||
665 | hba->clk_gating.state = CLKS_OFF; | ||
666 | |||
667 | rel_lock: | ||
668 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
669 | out: | ||
670 | return; | ||
671 | } | ||
672 | |||
673 | /* host lock must be held before calling this variant */ | ||
674 | static void __ufshcd_release(struct ufs_hba *hba) | ||
675 | { | ||
676 | if (!ufshcd_is_clkgating_allowed(hba)) | ||
677 | return; | ||
678 | |||
679 | hba->clk_gating.active_reqs--; | ||
680 | |||
681 | if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended | ||
682 | || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL | ||
683 | || hba->lrb_in_use || hba->outstanding_tasks | ||
684 | || hba->active_uic_cmd || hba->uic_async_done) | ||
685 | return; | ||
686 | |||
687 | hba->clk_gating.state = REQ_CLKS_OFF; | ||
688 | schedule_delayed_work(&hba->clk_gating.gate_work, | ||
689 | msecs_to_jiffies(hba->clk_gating.delay_ms)); | ||
690 | } | ||
691 | |||
692 | void ufshcd_release(struct ufs_hba *hba) | ||
693 | { | ||
694 | unsigned long flags; | ||
695 | |||
696 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
697 | __ufshcd_release(hba); | ||
698 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
699 | } | ||
700 | |||
701 | static ssize_t ufshcd_clkgate_delay_show(struct device *dev, | ||
702 | struct device_attribute *attr, char *buf) | ||
703 | { | ||
704 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
705 | |||
706 | return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms); | ||
707 | } | ||
708 | |||
709 | static ssize_t ufshcd_clkgate_delay_store(struct device *dev, | ||
710 | struct device_attribute *attr, const char *buf, size_t count) | ||
711 | { | ||
712 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
713 | unsigned long flags, value; | ||
714 | |||
715 | if (kstrtoul(buf, 0, &value)) | ||
716 | return -EINVAL; | ||
717 | |||
718 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
719 | hba->clk_gating.delay_ms = value; | ||
720 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
721 | return count; | ||
722 | } | ||
723 | |||
724 | static void ufshcd_init_clk_gating(struct ufs_hba *hba) | ||
725 | { | ||
726 | if (!ufshcd_is_clkgating_allowed(hba)) | ||
727 | return; | ||
728 | |||
729 | hba->clk_gating.delay_ms = 150; | ||
730 | INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work); | ||
731 | INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work); | ||
732 | |||
733 | hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show; | ||
734 | hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store; | ||
735 | sysfs_attr_init(&hba->clk_gating.delay_attr.attr); | ||
736 | hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms"; | ||
737 | hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR; | ||
738 | if (device_create_file(hba->dev, &hba->clk_gating.delay_attr)) | ||
739 | dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n"); | ||
740 | } | ||
741 | |||
742 | static void ufshcd_exit_clk_gating(struct ufs_hba *hba) | ||
743 | { | ||
744 | if (!ufshcd_is_clkgating_allowed(hba)) | ||
745 | return; | ||
746 | device_remove_file(hba->dev, &hba->clk_gating.delay_attr); | ||
747 | } | ||
748 | |||
749 | /* Must be called with host lock acquired */ | ||
750 | static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) | ||
751 | { | ||
752 | if (!ufshcd_is_clkscaling_enabled(hba)) | ||
753 | return; | ||
754 | |||
755 | if (!hba->clk_scaling.is_busy_started) { | ||
756 | hba->clk_scaling.busy_start_t = ktime_get(); | ||
757 | hba->clk_scaling.is_busy_started = true; | ||
758 | } | ||
759 | } | ||
760 | |||
761 | static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) | ||
762 | { | ||
763 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | ||
764 | |||
765 | if (!ufshcd_is_clkscaling_enabled(hba)) | ||
766 | return; | ||
767 | |||
768 | if (!hba->outstanding_reqs && scaling->is_busy_started) { | ||
769 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | ||
770 | scaling->busy_start_t)); | ||
771 | scaling->busy_start_t = ktime_set(0, 0); | ||
772 | scaling->is_busy_started = false; | ||
773 | } | ||
774 | } | ||
416 | /** | 775 | /** |
417 | * ufshcd_send_command - Send SCSI or device management commands | 776 | * ufshcd_send_command - Send SCSI or device management commands |
418 | * @hba: per adapter instance | 777 | * @hba: per adapter instance |
@@ -421,6 +780,7 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba) | |||
421 | static inline | 780 | static inline |
422 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | 781 | void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) |
423 | { | 782 | { |
783 | ufshcd_clk_scaling_start_busy(hba); | ||
424 | __set_bit(task_tag, &hba->outstanding_reqs); | 784 | __set_bit(task_tag, &hba->outstanding_reqs); |
425 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); | 785 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); |
426 | } | 786 | } |
@@ -576,15 +936,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |||
576 | * @uic_cmd: UIC command | 936 | * @uic_cmd: UIC command |
577 | * | 937 | * |
578 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called | 938 | * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called |
579 | * with mutex held. | 939 | * with mutex held and host_lock locked. |
580 | * Returns 0 only if success. | 940 | * Returns 0 only if success. |
581 | */ | 941 | */ |
582 | static int | 942 | static int |
583 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 943 | __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
584 | { | 944 | { |
585 | int ret; | ||
586 | unsigned long flags; | ||
587 | |||
588 | if (!ufshcd_ready_for_uic_cmd(hba)) { | 945 | if (!ufshcd_ready_for_uic_cmd(hba)) { |
589 | dev_err(hba->dev, | 946 | dev_err(hba->dev, |
590 | "Controller not ready to accept UIC commands\n"); | 947 | "Controller not ready to accept UIC commands\n"); |
@@ -593,13 +950,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |||
593 | 950 | ||
594 | init_completion(&uic_cmd->done); | 951 | init_completion(&uic_cmd->done); |
595 | 952 | ||
596 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
597 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); | 953 | ufshcd_dispatch_uic_cmd(hba, uic_cmd); |
598 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
599 | |||
600 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | ||
601 | 954 | ||
602 | return ret; | 955 | return 0; |
603 | } | 956 | } |
604 | 957 | ||
605 | /** | 958 | /** |
@@ -613,11 +966,19 @@ static int | |||
613 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | 966 | ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) |
614 | { | 967 | { |
615 | int ret; | 968 | int ret; |
969 | unsigned long flags; | ||
616 | 970 | ||
971 | ufshcd_hold(hba, false); | ||
617 | mutex_lock(&hba->uic_cmd_mutex); | 972 | mutex_lock(&hba->uic_cmd_mutex); |
973 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
618 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); | 974 | ret = __ufshcd_send_uic_cmd(hba, uic_cmd); |
975 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
976 | if (!ret) | ||
977 | ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd); | ||
978 | |||
619 | mutex_unlock(&hba->uic_cmd_mutex); | 979 | mutex_unlock(&hba->uic_cmd_mutex); |
620 | 980 | ||
981 | ufshcd_release(hba); | ||
621 | return ret; | 982 | return ret; |
622 | } | 983 | } |
623 | 984 | ||
@@ -867,6 +1228,32 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
867 | return ret; | 1228 | return ret; |
868 | } | 1229 | } |
869 | 1230 | ||
1231 | /* | ||
1232 | * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN | ||
1233 | * @scsi_lun: scsi LUN id | ||
1234 | * | ||
1235 | * Returns UPIU LUN id | ||
1236 | */ | ||
1237 | static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) | ||
1238 | { | ||
1239 | if (scsi_is_wlun(scsi_lun)) | ||
1240 | return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID) | ||
1241 | | UFS_UPIU_WLUN_ID; | ||
1242 | else | ||
1243 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; | ||
1244 | } | ||
1245 | |||
1246 | /** | ||
1247 | * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID | ||
1248 | * @scsi_lun: UPIU W-LUN id | ||
1249 | * | ||
1250 | * Returns SCSI W-LUN id | ||
1251 | */ | ||
1252 | static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id) | ||
1253 | { | ||
1254 | return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE; | ||
1255 | } | ||
1256 | |||
870 | /** | 1257 | /** |
871 | * ufshcd_queuecommand - main entry point for SCSI requests | 1258 | * ufshcd_queuecommand - main entry point for SCSI requests |
872 | * @cmd: command from SCSI Midlayer | 1259 | * @cmd: command from SCSI Midlayer |
@@ -918,6 +1305,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
918 | goto out; | 1305 | goto out; |
919 | } | 1306 | } |
920 | 1307 | ||
1308 | err = ufshcd_hold(hba, true); | ||
1309 | if (err) { | ||
1310 | err = SCSI_MLQUEUE_HOST_BUSY; | ||
1311 | clear_bit_unlock(tag, &hba->lrb_in_use); | ||
1312 | goto out; | ||
1313 | } | ||
1314 | WARN_ON(hba->clk_gating.state != CLKS_ON); | ||
1315 | |||
921 | lrbp = &hba->lrb[tag]; | 1316 | lrbp = &hba->lrb[tag]; |
922 | 1317 | ||
923 | WARN_ON(lrbp->cmd); | 1318 | WARN_ON(lrbp->cmd); |
@@ -925,7 +1320,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) | |||
925 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; | 1320 | lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; |
926 | lrbp->sense_buffer = cmd->sense_buffer; | 1321 | lrbp->sense_buffer = cmd->sense_buffer; |
927 | lrbp->task_tag = tag; | 1322 | lrbp->task_tag = tag; |
928 | lrbp->lun = cmd->device->lun; | 1323 | lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); |
929 | lrbp->intr_cmd = false; | 1324 | lrbp->intr_cmd = false; |
930 | lrbp->command_type = UTP_CMD_TYPE_SCSI; | 1325 | lrbp->command_type = UTP_CMD_TYPE_SCSI; |
931 | 1326 | ||
@@ -1193,6 +1588,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |||
1193 | 1588 | ||
1194 | BUG_ON(!hba); | 1589 | BUG_ON(!hba); |
1195 | 1590 | ||
1591 | ufshcd_hold(hba, false); | ||
1196 | mutex_lock(&hba->dev_cmd.lock); | 1592 | mutex_lock(&hba->dev_cmd.lock); |
1197 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | 1593 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, |
1198 | selector); | 1594 | selector); |
@@ -1236,6 +1632,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |||
1236 | 1632 | ||
1237 | out_unlock: | 1633 | out_unlock: |
1238 | mutex_unlock(&hba->dev_cmd.lock); | 1634 | mutex_unlock(&hba->dev_cmd.lock); |
1635 | ufshcd_release(hba); | ||
1239 | return err; | 1636 | return err; |
1240 | } | 1637 | } |
1241 | 1638 | ||
@@ -1259,6 +1656,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
1259 | 1656 | ||
1260 | BUG_ON(!hba); | 1657 | BUG_ON(!hba); |
1261 | 1658 | ||
1659 | ufshcd_hold(hba, false); | ||
1262 | if (!attr_val) { | 1660 | if (!attr_val) { |
1263 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", | 1661 | dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", |
1264 | __func__, opcode); | 1662 | __func__, opcode); |
@@ -1298,6 +1696,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, | |||
1298 | out_unlock: | 1696 | out_unlock: |
1299 | mutex_unlock(&hba->dev_cmd.lock); | 1697 | mutex_unlock(&hba->dev_cmd.lock); |
1300 | out: | 1698 | out: |
1699 | ufshcd_release(hba); | ||
1301 | return err; | 1700 | return err; |
1302 | } | 1701 | } |
1303 | 1702 | ||
@@ -1325,6 +1724,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, | |||
1325 | 1724 | ||
1326 | BUG_ON(!hba); | 1725 | BUG_ON(!hba); |
1327 | 1726 | ||
1727 | ufshcd_hold(hba, false); | ||
1328 | if (!desc_buf) { | 1728 | if (!desc_buf) { |
1329 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | 1729 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", |
1330 | __func__, opcode); | 1730 | __func__, opcode); |
@@ -1374,10 +1774,120 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba, | |||
1374 | out_unlock: | 1774 | out_unlock: |
1375 | mutex_unlock(&hba->dev_cmd.lock); | 1775 | mutex_unlock(&hba->dev_cmd.lock); |
1376 | out: | 1776 | out: |
1777 | ufshcd_release(hba); | ||
1377 | return err; | 1778 | return err; |
1378 | } | 1779 | } |
1379 | 1780 | ||
1380 | /** | 1781 | /** |
1782 | * ufshcd_read_desc_param - read the specified descriptor parameter | ||
1783 | * @hba: Pointer to adapter instance | ||
1784 | * @desc_id: descriptor idn value | ||
1785 | * @desc_index: descriptor index | ||
1786 | * @param_offset: offset of the parameter to read | ||
1787 | * @param_read_buf: pointer to buffer where parameter would be read | ||
1788 | * @param_size: sizeof(param_read_buf) | ||
1789 | * | ||
1790 | * Return 0 in case of success, non-zero otherwise | ||
1791 | */ | ||
1792 | static int ufshcd_read_desc_param(struct ufs_hba *hba, | ||
1793 | enum desc_idn desc_id, | ||
1794 | int desc_index, | ||
1795 | u32 param_offset, | ||
1796 | u8 *param_read_buf, | ||
1797 | u32 param_size) | ||
1798 | { | ||
1799 | int ret; | ||
1800 | u8 *desc_buf; | ||
1801 | u32 buff_len; | ||
1802 | bool is_kmalloc = true; | ||
1803 | |||
1804 | /* safety checks */ | ||
1805 | if (desc_id >= QUERY_DESC_IDN_MAX) | ||
1806 | return -EINVAL; | ||
1807 | |||
1808 | buff_len = ufs_query_desc_max_size[desc_id]; | ||
1809 | if ((param_offset + param_size) > buff_len) | ||
1810 | return -EINVAL; | ||
1811 | |||
1812 | if (!param_offset && (param_size == buff_len)) { | ||
1813 | /* memory space already available to hold full descriptor */ | ||
1814 | desc_buf = param_read_buf; | ||
1815 | is_kmalloc = false; | ||
1816 | } else { | ||
1817 | /* allocate memory to hold full descriptor */ | ||
1818 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | ||
1819 | if (!desc_buf) | ||
1820 | return -ENOMEM; | ||
1821 | } | ||
1822 | |||
1823 | ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, | ||
1824 | desc_id, desc_index, 0, desc_buf, | ||
1825 | &buff_len); | ||
1826 | |||
1827 | if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || | ||
1828 | (desc_buf[QUERY_DESC_LENGTH_OFFSET] != | ||
1829 | ufs_query_desc_max_size[desc_id]) | ||
1830 | || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) { | ||
1831 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d", | ||
1832 | __func__, desc_id, param_offset, buff_len, ret); | ||
1833 | if (!ret) | ||
1834 | ret = -EINVAL; | ||
1835 | |||
1836 | goto out; | ||
1837 | } | ||
1838 | |||
1839 | if (is_kmalloc) | ||
1840 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); | ||
1841 | out: | ||
1842 | if (is_kmalloc) | ||
1843 | kfree(desc_buf); | ||
1844 | return ret; | ||
1845 | } | ||
1846 | |||
1847 | static inline int ufshcd_read_desc(struct ufs_hba *hba, | ||
1848 | enum desc_idn desc_id, | ||
1849 | int desc_index, | ||
1850 | u8 *buf, | ||
1851 | u32 size) | ||
1852 | { | ||
1853 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); | ||
1854 | } | ||
1855 | |||
1856 | static inline int ufshcd_read_power_desc(struct ufs_hba *hba, | ||
1857 | u8 *buf, | ||
1858 | u32 size) | ||
1859 | { | ||
1860 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); | ||
1861 | } | ||
1862 | |||
1863 | /** | ||
1864 | * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter | ||
1865 | * @hba: Pointer to adapter instance | ||
1866 | * @lun: lun id | ||
1867 | * @param_offset: offset of the parameter to read | ||
1868 | * @param_read_buf: pointer to buffer where parameter would be read | ||
1869 | * @param_size: sizeof(param_read_buf) | ||
1870 | * | ||
1871 | * Return 0 in case of success, non-zero otherwise | ||
1872 | */ | ||
1873 | static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, | ||
1874 | int lun, | ||
1875 | enum unit_desc_param param_offset, | ||
1876 | u8 *param_read_buf, | ||
1877 | u32 param_size) | ||
1878 | { | ||
1879 | /* | ||
1880 | * Unit descriptors are only available for general purpose LUs (LUN id | ||
1881 | * from 0 to 7) and RPMB Well known LU. | ||
1882 | */ | ||
1883 | if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN)) | ||
1884 | return -EOPNOTSUPP; | ||
1885 | |||
1886 | return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, | ||
1887 | param_offset, param_read_buf, param_size); | ||
1888 | } | ||
1889 | |||
1890 | /** | ||
1381 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | 1891 | * ufshcd_memory_alloc - allocate memory for host memory space data structures |
1382 | * @hba: per adapter instance | 1892 | * @hba: per adapter instance |
1383 | * | 1893 | * |
@@ -1621,44 +2131,54 @@ out: | |||
1621 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | 2131 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); |
1622 | 2132 | ||
1623 | /** | 2133 | /** |
1624 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | 2134 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power |
1625 | * using DME_SET primitives. | 2135 | * state) and waits for it to take effect. |
2136 | * | ||
1626 | * @hba: per adapter instance | 2137 | * @hba: per adapter instance |
1627 | * @mode: powr mode value | 2138 | * @cmd: UIC command to execute |
2139 | * | ||
2140 | * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER & | ||
2141 | * DME_HIBERNATE_EXIT commands take some time to take its effect on both host | ||
2142 | * and device UniPro link and hence it's final completion would be indicated by | ||
2143 | * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in | ||
2144 | * addition to normal UIC command completion Status (UCCS). This function only | ||
2145 | * returns after the relevant status bits indicate the completion. | ||
1628 | * | 2146 | * |
1629 | * Returns 0 on success, non-zero value on failure | 2147 | * Returns 0 on success, non-zero value on failure |
1630 | */ | 2148 | */ |
1631 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | 2149 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) |
1632 | { | 2150 | { |
1633 | struct uic_command uic_cmd = {0}; | 2151 | struct completion uic_async_done; |
1634 | struct completion pwr_done; | ||
1635 | unsigned long flags; | 2152 | unsigned long flags; |
1636 | u8 status; | 2153 | u8 status; |
1637 | int ret; | 2154 | int ret; |
1638 | 2155 | ||
1639 | uic_cmd.command = UIC_CMD_DME_SET; | ||
1640 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | ||
1641 | uic_cmd.argument3 = mode; | ||
1642 | init_completion(&pwr_done); | ||
1643 | |||
1644 | mutex_lock(&hba->uic_cmd_mutex); | 2156 | mutex_lock(&hba->uic_cmd_mutex); |
2157 | init_completion(&uic_async_done); | ||
1645 | 2158 | ||
1646 | spin_lock_irqsave(hba->host->host_lock, flags); | 2159 | spin_lock_irqsave(hba->host->host_lock, flags); |
1647 | hba->pwr_done = &pwr_done; | 2160 | hba->uic_async_done = &uic_async_done; |
2161 | ret = __ufshcd_send_uic_cmd(hba, cmd); | ||
1648 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 2162 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1649 | ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1650 | if (ret) { | 2163 | if (ret) { |
1651 | dev_err(hba->dev, | 2164 | dev_err(hba->dev, |
1652 | "pwr mode change with mode 0x%x uic error %d\n", | 2165 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", |
1653 | mode, ret); | 2166 | cmd->command, cmd->argument3, ret); |
2167 | goto out; | ||
2168 | } | ||
2169 | ret = ufshcd_wait_for_uic_cmd(hba, cmd); | ||
2170 | if (ret) { | ||
2171 | dev_err(hba->dev, | ||
2172 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | ||
2173 | cmd->command, cmd->argument3, ret); | ||
1654 | goto out; | 2174 | goto out; |
1655 | } | 2175 | } |
1656 | 2176 | ||
1657 | if (!wait_for_completion_timeout(hba->pwr_done, | 2177 | if (!wait_for_completion_timeout(hba->uic_async_done, |
1658 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { | 2178 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { |
1659 | dev_err(hba->dev, | 2179 | dev_err(hba->dev, |
1660 | "pwr mode change with mode 0x%x completion timeout\n", | 2180 | "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n", |
1661 | mode); | 2181 | cmd->command, cmd->argument3); |
1662 | ret = -ETIMEDOUT; | 2182 | ret = -ETIMEDOUT; |
1663 | goto out; | 2183 | goto out; |
1664 | } | 2184 | } |
@@ -1666,53 +2186,144 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |||
1666 | status = ufshcd_get_upmcrs(hba); | 2186 | status = ufshcd_get_upmcrs(hba); |
1667 | if (status != PWR_LOCAL) { | 2187 | if (status != PWR_LOCAL) { |
1668 | dev_err(hba->dev, | 2188 | dev_err(hba->dev, |
1669 | "pwr mode change failed, host umpcrs:0x%x\n", | 2189 | "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", |
1670 | status); | 2190 | cmd->command, status); |
1671 | ret = (status != PWR_OK) ? status : -1; | 2191 | ret = (status != PWR_OK) ? status : -1; |
1672 | } | 2192 | } |
1673 | out: | 2193 | out: |
1674 | spin_lock_irqsave(hba->host->host_lock, flags); | 2194 | spin_lock_irqsave(hba->host->host_lock, flags); |
1675 | hba->pwr_done = NULL; | 2195 | hba->uic_async_done = NULL; |
1676 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 2196 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1677 | mutex_unlock(&hba->uic_cmd_mutex); | 2197 | mutex_unlock(&hba->uic_cmd_mutex); |
2198 | |||
1678 | return ret; | 2199 | return ret; |
1679 | } | 2200 | } |
1680 | 2201 | ||
1681 | /** | 2202 | /** |
1682 | * ufshcd_config_max_pwr_mode - Set & Change power mode with | 2203 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage |
1683 | * maximum capability attribute information. | 2204 | * using DME_SET primitives. |
1684 | * @hba: per adapter instance | 2205 | * @hba: per adapter instance |
2206 | * @mode: powr mode value | ||
1685 | * | 2207 | * |
1686 | * Returns 0 on success, non-zero value on failure | 2208 | * Returns 0 on success, non-zero value on failure |
1687 | */ | 2209 | */ |
1688 | static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) | 2210 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) |
1689 | { | 2211 | { |
1690 | enum {RX = 0, TX = 1}; | 2212 | struct uic_command uic_cmd = {0}; |
1691 | u32 lanes[] = {1, 1}; | ||
1692 | u32 gear[] = {1, 1}; | ||
1693 | u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; | ||
1694 | int ret; | 2213 | int ret; |
1695 | 2214 | ||
2215 | uic_cmd.command = UIC_CMD_DME_SET; | ||
2216 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | ||
2217 | uic_cmd.argument3 = mode; | ||
2218 | ufshcd_hold(hba, false); | ||
2219 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
2220 | ufshcd_release(hba); | ||
2221 | |||
2222 | return ret; | ||
2223 | } | ||
2224 | |||
2225 | static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) | ||
2226 | { | ||
2227 | struct uic_command uic_cmd = {0}; | ||
2228 | |||
2229 | uic_cmd.command = UIC_CMD_DME_HIBER_ENTER; | ||
2230 | |||
2231 | return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
2232 | } | ||
2233 | |||
2234 | static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) | ||
2235 | { | ||
2236 | struct uic_command uic_cmd = {0}; | ||
2237 | int ret; | ||
2238 | |||
2239 | uic_cmd.command = UIC_CMD_DME_HIBER_EXIT; | ||
2240 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | ||
2241 | if (ret) { | ||
2242 | ufshcd_set_link_off(hba); | ||
2243 | ret = ufshcd_host_reset_and_restore(hba); | ||
2244 | } | ||
2245 | |||
2246 | return ret; | ||
2247 | } | ||
2248 | |||
2249 | /** | ||
2250 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device | ||
2251 | * @hba: per-adapter instance | ||
2252 | */ | ||
2253 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) | ||
2254 | { | ||
2255 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; | ||
2256 | |||
2257 | if (hba->max_pwr_info.is_valid) | ||
2258 | return 0; | ||
2259 | |||
2260 | pwr_info->pwr_tx = FASTAUTO_MODE; | ||
2261 | pwr_info->pwr_rx = FASTAUTO_MODE; | ||
2262 | pwr_info->hs_rate = PA_HS_MODE_B; | ||
2263 | |||
1696 | /* Get the connected lane count */ | 2264 | /* Get the connected lane count */ |
1697 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); | 2265 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), |
1698 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); | 2266 | &pwr_info->lane_rx); |
2267 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | ||
2268 | &pwr_info->lane_tx); | ||
2269 | |||
2270 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { | ||
2271 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", | ||
2272 | __func__, | ||
2273 | pwr_info->lane_rx, | ||
2274 | pwr_info->lane_tx); | ||
2275 | return -EINVAL; | ||
2276 | } | ||
1699 | 2277 | ||
1700 | /* | 2278 | /* |
1701 | * First, get the maximum gears of HS speed. | 2279 | * First, get the maximum gears of HS speed. |
1702 | * If a zero value, it means there is no HSGEAR capability. | 2280 | * If a zero value, it means there is no HSGEAR capability. |
1703 | * Then, get the maximum gears of PWM speed. | 2281 | * Then, get the maximum gears of PWM speed. |
1704 | */ | 2282 | */ |
1705 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); | 2283 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); |
1706 | if (!gear[RX]) { | 2284 | if (!pwr_info->gear_rx) { |
1707 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); | 2285 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
1708 | pwr[RX] = SLOWAUTO_MODE; | 2286 | &pwr_info->gear_rx); |
2287 | if (!pwr_info->gear_rx) { | ||
2288 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", | ||
2289 | __func__, pwr_info->gear_rx); | ||
2290 | return -EINVAL; | ||
2291 | } | ||
2292 | pwr_info->pwr_rx = SLOWAUTO_MODE; | ||
1709 | } | 2293 | } |
1710 | 2294 | ||
1711 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); | 2295 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), |
1712 | if (!gear[TX]) { | 2296 | &pwr_info->gear_tx); |
2297 | if (!pwr_info->gear_tx) { | ||
1713 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | 2298 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), |
1714 | &gear[TX]); | 2299 | &pwr_info->gear_tx); |
1715 | pwr[TX] = SLOWAUTO_MODE; | 2300 | if (!pwr_info->gear_tx) { |
2301 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", | ||
2302 | __func__, pwr_info->gear_tx); | ||
2303 | return -EINVAL; | ||
2304 | } | ||
2305 | pwr_info->pwr_tx = SLOWAUTO_MODE; | ||
2306 | } | ||
2307 | |||
2308 | hba->max_pwr_info.is_valid = true; | ||
2309 | return 0; | ||
2310 | } | ||
2311 | |||
2312 | static int ufshcd_change_power_mode(struct ufs_hba *hba, | ||
2313 | struct ufs_pa_layer_attr *pwr_mode) | ||
2314 | { | ||
2315 | int ret; | ||
2316 | |||
2317 | /* if already configured to the requested pwr_mode */ | ||
2318 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && | ||
2319 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && | ||
2320 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && | ||
2321 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && | ||
2322 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && | ||
2323 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && | ||
2324 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { | ||
2325 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); | ||
2326 | return 0; | ||
1716 | } | 2327 | } |
1717 | 2328 | ||
1718 | /* | 2329 | /* |
@@ -1721,23 +2332,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) | |||
1721 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | 2332 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, |
1722 | * - PA_HSSERIES | 2333 | * - PA_HSSERIES |
1723 | */ | 2334 | */ |
1724 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); | 2335 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); |
1725 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); | 2336 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), |
1726 | if (pwr[RX] == FASTAUTO_MODE) | 2337 | pwr_mode->lane_rx); |
2338 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || | ||
2339 | pwr_mode->pwr_rx == FAST_MODE) | ||
1727 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); | 2340 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); |
2341 | else | ||
2342 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); | ||
1728 | 2343 | ||
1729 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); | 2344 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); |
1730 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); | 2345 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), |
1731 | if (pwr[TX] == FASTAUTO_MODE) | 2346 | pwr_mode->lane_tx); |
2347 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || | ||
2348 | pwr_mode->pwr_tx == FAST_MODE) | ||
1732 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); | 2349 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); |
2350 | else | ||
2351 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); | ||
1733 | 2352 | ||
1734 | if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) | 2353 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || |
1735 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); | 2354 | pwr_mode->pwr_tx == FASTAUTO_MODE || |
2355 | pwr_mode->pwr_rx == FAST_MODE || | ||
2356 | pwr_mode->pwr_tx == FAST_MODE) | ||
2357 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), | ||
2358 | pwr_mode->hs_rate); | ||
1736 | 2359 | ||
1737 | ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); | 2360 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 |
1738 | if (ret) | 2361 | | pwr_mode->pwr_tx); |
2362 | |||
2363 | if (ret) { | ||
1739 | dev_err(hba->dev, | 2364 | dev_err(hba->dev, |
1740 | "pwr_mode: power mode change failed %d\n", ret); | 2365 | "%s: power mode change failed %d\n", __func__, ret); |
2366 | } else { | ||
2367 | if (hba->vops && hba->vops->pwr_change_notify) | ||
2368 | hba->vops->pwr_change_notify(hba, | ||
2369 | POST_CHANGE, NULL, pwr_mode); | ||
2370 | |||
2371 | memcpy(&hba->pwr_info, pwr_mode, | ||
2372 | sizeof(struct ufs_pa_layer_attr)); | ||
2373 | } | ||
2374 | |||
2375 | return ret; | ||
2376 | } | ||
2377 | |||
2378 | /** | ||
2379 | * ufshcd_config_pwr_mode - configure a new power mode | ||
2380 | * @hba: per-adapter instance | ||
2381 | * @desired_pwr_mode: desired power configuration | ||
2382 | */ | ||
2383 | static int ufshcd_config_pwr_mode(struct ufs_hba *hba, | ||
2384 | struct ufs_pa_layer_attr *desired_pwr_mode) | ||
2385 | { | ||
2386 | struct ufs_pa_layer_attr final_params = { 0 }; | ||
2387 | int ret; | ||
2388 | |||
2389 | if (hba->vops && hba->vops->pwr_change_notify) | ||
2390 | hba->vops->pwr_change_notify(hba, | ||
2391 | PRE_CHANGE, desired_pwr_mode, &final_params); | ||
2392 | else | ||
2393 | memcpy(&final_params, desired_pwr_mode, sizeof(final_params)); | ||
2394 | |||
2395 | ret = ufshcd_change_power_mode(hba, &final_params); | ||
1741 | 2396 | ||
1742 | return ret; | 2397 | return ret; |
1743 | } | 2398 | } |
@@ -1798,11 +2453,10 @@ out: | |||
1798 | * @hba: per adapter instance | 2453 | * @hba: per adapter instance |
1799 | * | 2454 | * |
1800 | * To bring UFS host controller to operational state, | 2455 | * To bring UFS host controller to operational state, |
1801 | * 1. Check if device is present | 2456 | * 1. Enable required interrupts |
1802 | * 2. Enable required interrupts | 2457 | * 2. Configure interrupt aggregation |
1803 | * 3. Configure interrupt aggregation | 2458 | * 3. Program UTRL and UTMRL base addres |
1804 | * 4. Program UTRL and UTMRL base addres | 2459 | * 4. Configure run-stop-registers |
1805 | * 5. Configure run-stop-registers | ||
1806 | * | 2460 | * |
1807 | * Returns 0 on success, non-zero value on failure | 2461 | * Returns 0 on success, non-zero value on failure |
1808 | */ | 2462 | */ |
@@ -1811,14 +2465,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
1811 | int err = 0; | 2465 | int err = 0; |
1812 | u32 reg; | 2466 | u32 reg; |
1813 | 2467 | ||
1814 | /* check if device present */ | ||
1815 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); | ||
1816 | if (!ufshcd_is_device_present(reg)) { | ||
1817 | dev_err(hba->dev, "cc: Device not present\n"); | ||
1818 | err = -ENXIO; | ||
1819 | goto out; | ||
1820 | } | ||
1821 | |||
1822 | /* Enable required interrupts */ | 2468 | /* Enable required interrupts */ |
1823 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | 2469 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); |
1824 | 2470 | ||
@@ -1839,6 +2485,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
1839 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | 2485 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 |
1840 | * DEI, HEI bits must be 0 | 2486 | * DEI, HEI bits must be 0 |
1841 | */ | 2487 | */ |
2488 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); | ||
1842 | if (!(ufshcd_get_lists_status(reg))) { | 2489 | if (!(ufshcd_get_lists_status(reg))) { |
1843 | ufshcd_enable_run_stop_reg(hba); | 2490 | ufshcd_enable_run_stop_reg(hba); |
1844 | } else { | 2491 | } else { |
@@ -1885,6 +2532,12 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
1885 | msleep(5); | 2532 | msleep(5); |
1886 | } | 2533 | } |
1887 | 2534 | ||
2535 | /* UniPro link is disabled at this point */ | ||
2536 | ufshcd_set_link_off(hba); | ||
2537 | |||
2538 | if (hba->vops && hba->vops->hce_enable_notify) | ||
2539 | hba->vops->hce_enable_notify(hba, PRE_CHANGE); | ||
2540 | |||
1888 | /* start controller initialization sequence */ | 2541 | /* start controller initialization sequence */ |
1889 | ufshcd_hba_start(hba); | 2542 | ufshcd_hba_start(hba); |
1890 | 2543 | ||
@@ -1912,6 +2565,13 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
1912 | } | 2565 | } |
1913 | msleep(5); | 2566 | msleep(5); |
1914 | } | 2567 | } |
2568 | |||
2569 | /* enable UIC related interrupts */ | ||
2570 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); | ||
2571 | |||
2572 | if (hba->vops && hba->vops->hce_enable_notify) | ||
2573 | hba->vops->hce_enable_notify(hba, POST_CHANGE); | ||
2574 | |||
1915 | return 0; | 2575 | return 0; |
1916 | } | 2576 | } |
1917 | 2577 | ||
@@ -1924,16 +2584,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) | |||
1924 | static int ufshcd_link_startup(struct ufs_hba *hba) | 2584 | static int ufshcd_link_startup(struct ufs_hba *hba) |
1925 | { | 2585 | { |
1926 | int ret; | 2586 | int ret; |
2587 | int retries = DME_LINKSTARTUP_RETRIES; | ||
1927 | 2588 | ||
1928 | /* enable UIC related interrupts */ | 2589 | do { |
1929 | ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); | 2590 | if (hba->vops && hba->vops->link_startup_notify) |
2591 | hba->vops->link_startup_notify(hba, PRE_CHANGE); | ||
2592 | |||
2593 | ret = ufshcd_dme_link_startup(hba); | ||
2594 | |||
2595 | /* check if device is detected by inter-connect layer */ | ||
2596 | if (!ret && !ufshcd_is_device_present(hba)) { | ||
2597 | dev_err(hba->dev, "%s: Device not present\n", __func__); | ||
2598 | ret = -ENXIO; | ||
2599 | goto out; | ||
2600 | } | ||
2601 | |||
2602 | /* | ||
2603 | * DME link lost indication is only received when link is up, | ||
2604 | * but we can't be sure if the link is up until link startup | ||
2605 | * succeeds. So reset the local Uni-Pro and try again. | ||
2606 | */ | ||
2607 | if (ret && ufshcd_hba_enable(hba)) | ||
2608 | goto out; | ||
2609 | } while (ret && retries--); | ||
1930 | 2610 | ||
1931 | ret = ufshcd_dme_link_startup(hba); | ||
1932 | if (ret) | 2611 | if (ret) |
2612 | /* failed to get the link up... retire */ | ||
1933 | goto out; | 2613 | goto out; |
1934 | 2614 | ||
1935 | ret = ufshcd_make_hba_operational(hba); | 2615 | /* Include any host controller configuration via UIC commands */ |
2616 | if (hba->vops && hba->vops->link_startup_notify) { | ||
2617 | ret = hba->vops->link_startup_notify(hba, POST_CHANGE); | ||
2618 | if (ret) | ||
2619 | goto out; | ||
2620 | } | ||
1936 | 2621 | ||
2622 | ret = ufshcd_make_hba_operational(hba); | ||
1937 | out: | 2623 | out: |
1938 | if (ret) | 2624 | if (ret) |
1939 | dev_err(hba->dev, "link startup failed %d\n", ret); | 2625 | dev_err(hba->dev, "link startup failed %d\n", ret); |
@@ -1955,6 +2641,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
1955 | int err = 0; | 2641 | int err = 0; |
1956 | int retries; | 2642 | int retries; |
1957 | 2643 | ||
2644 | ufshcd_hold(hba, false); | ||
1958 | mutex_lock(&hba->dev_cmd.lock); | 2645 | mutex_lock(&hba->dev_cmd.lock); |
1959 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | 2646 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { |
1960 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | 2647 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, |
@@ -1966,6 +2653,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
1966 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | 2653 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); |
1967 | } | 2654 | } |
1968 | mutex_unlock(&hba->dev_cmd.lock); | 2655 | mutex_unlock(&hba->dev_cmd.lock); |
2656 | ufshcd_release(hba); | ||
1969 | 2657 | ||
1970 | if (err) | 2658 | if (err) |
1971 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | 2659 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); |
@@ -1973,6 +2661,100 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
1973 | } | 2661 | } |
1974 | 2662 | ||
1975 | /** | 2663 | /** |
2664 | * ufshcd_set_queue_depth - set lun queue depth | ||
2665 | * @sdev: pointer to SCSI device | ||
2666 | * | ||
2667 | * Read bLUQueueDepth value and activate scsi tagged command | ||
2668 | * queueing. For WLUN, queue depth is set to 1. For best-effort | ||
2669 | * cases (bLUQueueDepth = 0) the queue depth is set to a maximum | ||
2670 | * value that host can queue. | ||
2671 | */ | ||
2672 | static void ufshcd_set_queue_depth(struct scsi_device *sdev) | ||
2673 | { | ||
2674 | int ret = 0; | ||
2675 | u8 lun_qdepth; | ||
2676 | struct ufs_hba *hba; | ||
2677 | |||
2678 | hba = shost_priv(sdev->host); | ||
2679 | |||
2680 | lun_qdepth = hba->nutrs; | ||
2681 | ret = ufshcd_read_unit_desc_param(hba, | ||
2682 | ufshcd_scsi_to_upiu_lun(sdev->lun), | ||
2683 | UNIT_DESC_PARAM_LU_Q_DEPTH, | ||
2684 | &lun_qdepth, | ||
2685 | sizeof(lun_qdepth)); | ||
2686 | |||
2687 | /* Some WLUN doesn't support unit descriptor */ | ||
2688 | if (ret == -EOPNOTSUPP) | ||
2689 | lun_qdepth = 1; | ||
2690 | else if (!lun_qdepth) | ||
2691 | /* eventually, we can figure out the real queue depth */ | ||
2692 | lun_qdepth = hba->nutrs; | ||
2693 | else | ||
2694 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | ||
2695 | |||
2696 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | ||
2697 | __func__, lun_qdepth); | ||
2698 | scsi_activate_tcq(sdev, lun_qdepth); | ||
2699 | } | ||
2700 | |||
2701 | /* | ||
2702 | * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR | ||
2703 | * @hba: per-adapter instance | ||
2704 | * @lun: UFS device lun id | ||
2705 | * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info | ||
2706 | * | ||
2707 | * Returns 0 in case of success and b_lu_write_protect status would be returned | ||
2708 | * @b_lu_write_protect parameter. | ||
2709 | * Returns -ENOTSUPP if reading b_lu_write_protect is not supported. | ||
2710 | * Returns -EINVAL in case of invalid parameters passed to this function. | ||
2711 | */ | ||
2712 | static int ufshcd_get_lu_wp(struct ufs_hba *hba, | ||
2713 | u8 lun, | ||
2714 | u8 *b_lu_write_protect) | ||
2715 | { | ||
2716 | int ret; | ||
2717 | |||
2718 | if (!b_lu_write_protect) | ||
2719 | ret = -EINVAL; | ||
2720 | /* | ||
2721 | * According to UFS device spec, RPMB LU can't be write | ||
2722 | * protected so skip reading bLUWriteProtect parameter for | ||
2723 | * it. For other W-LUs, UNIT DESCRIPTOR is not available. | ||
2724 | */ | ||
2725 | else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) | ||
2726 | ret = -ENOTSUPP; | ||
2727 | else | ||
2728 | ret = ufshcd_read_unit_desc_param(hba, | ||
2729 | lun, | ||
2730 | UNIT_DESC_PARAM_LU_WR_PROTECT, | ||
2731 | b_lu_write_protect, | ||
2732 | sizeof(*b_lu_write_protect)); | ||
2733 | return ret; | ||
2734 | } | ||
2735 | |||
2736 | /** | ||
2737 | * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect | ||
2738 | * status | ||
2739 | * @hba: per-adapter instance | ||
2740 | * @sdev: pointer to SCSI device | ||
2741 | * | ||
2742 | */ | ||
2743 | static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba, | ||
2744 | struct scsi_device *sdev) | ||
2745 | { | ||
2746 | if (hba->dev_info.f_power_on_wp_en && | ||
2747 | !hba->dev_info.is_lu_power_on_wp) { | ||
2748 | u8 b_lu_write_protect; | ||
2749 | |||
2750 | if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun), | ||
2751 | &b_lu_write_protect) && | ||
2752 | (b_lu_write_protect == UFS_LU_POWER_ON_WP)) | ||
2753 | hba->dev_info.is_lu_power_on_wp = true; | ||
2754 | } | ||
2755 | } | ||
2756 | |||
2757 | /** | ||
1976 | * ufshcd_slave_alloc - handle initial SCSI device configurations | 2758 | * ufshcd_slave_alloc - handle initial SCSI device configurations |
1977 | * @sdev: pointer to SCSI device | 2759 | * @sdev: pointer to SCSI device |
1978 | * | 2760 | * |
@@ -1981,7 +2763,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |||
1981 | static int ufshcd_slave_alloc(struct scsi_device *sdev) | 2763 | static int ufshcd_slave_alloc(struct scsi_device *sdev) |
1982 | { | 2764 | { |
1983 | struct ufs_hba *hba; | 2765 | struct ufs_hba *hba; |
1984 | int lun_qdepth; | ||
1985 | 2766 | ||
1986 | hba = shost_priv(sdev->host); | 2767 | hba = shost_priv(sdev->host); |
1987 | sdev->tagged_supported = 1; | 2768 | sdev->tagged_supported = 1; |
@@ -1996,16 +2777,10 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) | |||
1996 | /* REPORT SUPPORTED OPERATION CODES is not supported */ | 2777 | /* REPORT SUPPORTED OPERATION CODES is not supported */ |
1997 | sdev->no_report_opcodes = 1; | 2778 | sdev->no_report_opcodes = 1; |
1998 | 2779 | ||
1999 | lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev); | ||
2000 | if (lun_qdepth <= 0) | ||
2001 | /* eventually, we can figure out the real queue depth */ | ||
2002 | lun_qdepth = hba->nutrs; | ||
2003 | else | ||
2004 | lun_qdepth = min_t(int, lun_qdepth, hba->nutrs); | ||
2005 | 2780 | ||
2006 | dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", | 2781 | ufshcd_set_queue_depth(sdev); |
2007 | __func__, lun_qdepth); | 2782 | |
2008 | scsi_activate_tcq(sdev, lun_qdepth); | 2783 | ufshcd_get_lu_power_on_wp_status(hba, sdev); |
2009 | 2784 | ||
2010 | return 0; | 2785 | return 0; |
2011 | } | 2786 | } |
@@ -2068,6 +2843,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) | |||
2068 | 2843 | ||
2069 | hba = shost_priv(sdev->host); | 2844 | hba = shost_priv(sdev->host); |
2070 | scsi_deactivate_tcq(sdev, hba->nutrs); | 2845 | scsi_deactivate_tcq(sdev, hba->nutrs); |
2846 | /* Drop the reference as it won't be needed anymore */ | ||
2847 | if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) | ||
2848 | hba->sdev_ufs_device = NULL; | ||
2071 | } | 2849 | } |
2072 | 2850 | ||
2073 | /** | 2851 | /** |
@@ -2234,8 +3012,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) | |||
2234 | complete(&hba->active_uic_cmd->done); | 3012 | complete(&hba->active_uic_cmd->done); |
2235 | } | 3013 | } |
2236 | 3014 | ||
2237 | if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) | 3015 | if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) |
2238 | complete(hba->pwr_done); | 3016 | complete(hba->uic_async_done); |
2239 | } | 3017 | } |
2240 | 3018 | ||
2241 | /** | 3019 | /** |
@@ -2275,6 +3053,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |||
2275 | clear_bit_unlock(index, &hba->lrb_in_use); | 3053 | clear_bit_unlock(index, &hba->lrb_in_use); |
2276 | /* Do not touch lrbp after scsi done */ | 3054 | /* Do not touch lrbp after scsi done */ |
2277 | cmd->scsi_done(cmd); | 3055 | cmd->scsi_done(cmd); |
3056 | __ufshcd_release(hba); | ||
2278 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { | 3057 | } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { |
2279 | if (hba->dev_cmd.complete) | 3058 | if (hba->dev_cmd.complete) |
2280 | complete(hba->dev_cmd.complete); | 3059 | complete(hba->dev_cmd.complete); |
@@ -2284,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |||
2284 | /* clear corresponding bits of completed commands */ | 3063 | /* clear corresponding bits of completed commands */ |
2285 | hba->outstanding_reqs ^= completed_reqs; | 3064 | hba->outstanding_reqs ^= completed_reqs; |
2286 | 3065 | ||
3066 | ufshcd_clk_scaling_update_busy(hba); | ||
3067 | |||
2287 | /* we might have free'd some tags above */ | 3068 | /* we might have free'd some tags above */ |
2288 | wake_up(&hba->dev_cmd.tag_wq); | 3069 | wake_up(&hba->dev_cmd.tag_wq); |
2289 | } | 3070 | } |
@@ -2447,33 +3228,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) | |||
2447 | } | 3228 | } |
2448 | 3229 | ||
2449 | /** | 3230 | /** |
2450 | * ufshcd_urgent_bkops - handle urgent bkops exception event | 3231 | * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status |
2451 | * @hba: per-adapter instance | 3232 | * @hba: per-adapter instance |
3233 | * @status: bkops_status value | ||
2452 | * | 3234 | * |
2453 | * Enable fBackgroundOpsEn flag in the device to permit background | 3235 | * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn |
2454 | * operations. | 3236 | * flag in the device to permit background operations if the device |
3237 | * bkops_status is greater than or equal to "status" argument passed to | ||
3238 | * this function, disable otherwise. | ||
3239 | * | ||
3240 | * Returns 0 for success, non-zero in case of failure. | ||
3241 | * | ||
3242 | * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag | ||
3243 | * to know whether auto bkops is enabled or disabled after this function | ||
3244 | * returns control to it. | ||
2455 | */ | 3245 | */ |
2456 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | 3246 | static int ufshcd_bkops_ctrl(struct ufs_hba *hba, |
3247 | enum bkops_status status) | ||
2457 | { | 3248 | { |
2458 | int err; | 3249 | int err; |
2459 | u32 status = 0; | 3250 | u32 curr_status = 0; |
2460 | 3251 | ||
2461 | err = ufshcd_get_bkops_status(hba, &status); | 3252 | err = ufshcd_get_bkops_status(hba, &curr_status); |
2462 | if (err) { | 3253 | if (err) { |
2463 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", | 3254 | dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", |
2464 | __func__, err); | 3255 | __func__, err); |
2465 | goto out; | 3256 | goto out; |
3257 | } else if (curr_status > BKOPS_STATUS_MAX) { | ||
3258 | dev_err(hba->dev, "%s: invalid BKOPS status %d\n", | ||
3259 | __func__, curr_status); | ||
3260 | err = -EINVAL; | ||
3261 | goto out; | ||
2466 | } | 3262 | } |
2467 | 3263 | ||
2468 | status = status & 0xF; | 3264 | if (curr_status >= status) |
2469 | |||
2470 | /* handle only if status indicates performance impact or critical */ | ||
2471 | if (status >= BKOPS_STATUS_PERF_IMPACT) | ||
2472 | err = ufshcd_enable_auto_bkops(hba); | 3265 | err = ufshcd_enable_auto_bkops(hba); |
3266 | else | ||
3267 | err = ufshcd_disable_auto_bkops(hba); | ||
2473 | out: | 3268 | out: |
2474 | return err; | 3269 | return err; |
2475 | } | 3270 | } |
2476 | 3271 | ||
3272 | /** | ||
3273 | * ufshcd_urgent_bkops - handle urgent bkops exception event | ||
3274 | * @hba: per-adapter instance | ||
3275 | * | ||
3276 | * Enable fBackgroundOpsEn flag in the device to permit background | ||
3277 | * operations. | ||
3278 | * | ||
3279 | * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled | ||
3280 | * and negative error value for any other failure. | ||
3281 | */ | ||
3282 | static int ufshcd_urgent_bkops(struct ufs_hba *hba) | ||
3283 | { | ||
3284 | return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); | ||
3285 | } | ||
3286 | |||
2477 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) | 3287 | static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) |
2478 | { | 3288 | { |
2479 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, | 3289 | return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, |
@@ -2505,7 +3315,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) | |||
2505 | status &= hba->ee_ctrl_mask; | 3315 | status &= hba->ee_ctrl_mask; |
2506 | if (status & MASK_EE_URGENT_BKOPS) { | 3316 | if (status & MASK_EE_URGENT_BKOPS) { |
2507 | err = ufshcd_urgent_bkops(hba); | 3317 | err = ufshcd_urgent_bkops(hba); |
2508 | if (err) | 3318 | if (err < 0) |
2509 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", | 3319 | dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", |
2510 | __func__, err); | 3320 | __func__, err); |
2511 | } | 3321 | } |
@@ -2530,6 +3340,7 @@ static void ufshcd_err_handler(struct work_struct *work) | |||
2530 | hba = container_of(work, struct ufs_hba, eh_work); | 3340 | hba = container_of(work, struct ufs_hba, eh_work); |
2531 | 3341 | ||
2532 | pm_runtime_get_sync(hba->dev); | 3342 | pm_runtime_get_sync(hba->dev); |
3343 | ufshcd_hold(hba, false); | ||
2533 | 3344 | ||
2534 | spin_lock_irqsave(hba->host->host_lock, flags); | 3345 | spin_lock_irqsave(hba->host->host_lock, flags); |
2535 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { | 3346 | if (hba->ufshcd_state == UFSHCD_STATE_RESET) { |
@@ -2583,6 +3394,7 @@ static void ufshcd_err_handler(struct work_struct *work) | |||
2583 | 3394 | ||
2584 | out: | 3395 | out: |
2585 | scsi_unblock_requests(hba->host); | 3396 | scsi_unblock_requests(hba->host); |
3397 | ufshcd_release(hba); | ||
2586 | pm_runtime_put_sync(hba->dev); | 3398 | pm_runtime_put_sync(hba->dev); |
2587 | } | 3399 | } |
2588 | 3400 | ||
@@ -2766,6 +3578,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, | |||
2766 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. | 3578 | * the maximum wait time is bounded by %TM_CMD_TIMEOUT. |
2767 | */ | 3579 | */ |
2768 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); | 3580 | wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); |
3581 | ufshcd_hold(hba, false); | ||
2769 | 3582 | ||
2770 | spin_lock_irqsave(host->host_lock, flags); | 3583 | spin_lock_irqsave(host->host_lock, flags); |
2771 | task_req_descp = hba->utmrdl_base_addr; | 3584 | task_req_descp = hba->utmrdl_base_addr; |
@@ -2785,7 +3598,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, | |||
2785 | lun_id, task_tag); | 3598 | lun_id, task_tag); |
2786 | task_req_upiup->header.dword_1 = | 3599 | task_req_upiup->header.dword_1 = |
2787 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); | 3600 | UPIU_HEADER_DWORD(0, tm_function, 0, 0); |
2788 | 3601 | /* | |
3602 | * The host shall provide the same value for LUN field in the basic | ||
3603 | * header and for Input Parameter. | ||
3604 | */ | ||
2789 | task_req_upiup->input_param1 = cpu_to_be32(lun_id); | 3605 | task_req_upiup->input_param1 = cpu_to_be32(lun_id); |
2790 | task_req_upiup->input_param2 = cpu_to_be32(task_id); | 3606 | task_req_upiup->input_param2 = cpu_to_be32(task_id); |
2791 | 3607 | ||
@@ -2814,6 +3630,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, | |||
2814 | ufshcd_put_tm_slot(hba, free_slot); | 3630 | ufshcd_put_tm_slot(hba, free_slot); |
2815 | wake_up(&hba->tm_tag_wq); | 3631 | wake_up(&hba->tm_tag_wq); |
2816 | 3632 | ||
3633 | ufshcd_release(hba); | ||
2817 | return err; | 3634 | return err; |
2818 | } | 3635 | } |
2819 | 3636 | ||
@@ -2896,6 +3713,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
2896 | hba = shost_priv(host); | 3713 | hba = shost_priv(host); |
2897 | tag = cmd->request->tag; | 3714 | tag = cmd->request->tag; |
2898 | 3715 | ||
3716 | ufshcd_hold(hba, false); | ||
2899 | /* If command is already aborted/completed, return SUCCESS */ | 3717 | /* If command is already aborted/completed, return SUCCESS */ |
2900 | if (!(test_bit(tag, &hba->outstanding_reqs))) | 3718 | if (!(test_bit(tag, &hba->outstanding_reqs))) |
2901 | goto out; | 3719 | goto out; |
@@ -2960,6 +3778,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) | |||
2960 | 3778 | ||
2961 | clear_bit_unlock(tag, &hba->lrb_in_use); | 3779 | clear_bit_unlock(tag, &hba->lrb_in_use); |
2962 | wake_up(&hba->dev_cmd.tag_wq); | 3780 | wake_up(&hba->dev_cmd.tag_wq); |
3781 | |||
2963 | out: | 3782 | out: |
2964 | if (!err) { | 3783 | if (!err) { |
2965 | err = SUCCESS; | 3784 | err = SUCCESS; |
@@ -2968,6 +3787,11 @@ out: | |||
2968 | err = FAILED; | 3787 | err = FAILED; |
2969 | } | 3788 | } |
2970 | 3789 | ||
3790 | /* | ||
3791 | * This ufshcd_release() corresponds to the original scsi cmd that got | ||
3792 | * aborted here (as we won't get any IRQ for it). | ||
3793 | */ | ||
3794 | ufshcd_release(hba); | ||
2971 | return err; | 3795 | return err; |
2972 | } | 3796 | } |
2973 | 3797 | ||
@@ -2984,7 +3808,6 @@ out: | |||
2984 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | 3808 | static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) |
2985 | { | 3809 | { |
2986 | int err; | 3810 | int err; |
2987 | async_cookie_t cookie; | ||
2988 | unsigned long flags; | 3811 | unsigned long flags; |
2989 | 3812 | ||
2990 | /* Reset the host controller */ | 3813 | /* Reset the host controller */ |
@@ -2997,10 +3820,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) | |||
2997 | goto out; | 3820 | goto out; |
2998 | 3821 | ||
2999 | /* Establish the link again and restore the device */ | 3822 | /* Establish the link again and restore the device */ |
3000 | cookie = async_schedule(ufshcd_async_scan, hba); | 3823 | err = ufshcd_probe_hba(hba); |
3001 | /* wait for async scan to be completed */ | 3824 | |
3002 | async_synchronize_cookie(++cookie); | 3825 | if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) |
3003 | if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) | ||
3004 | err = -EIO; | 3826 | err = -EIO; |
3005 | out: | 3827 | out: |
3006 | if (err) | 3828 | if (err) |
@@ -3022,8 +3844,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba) | |||
3022 | { | 3844 | { |
3023 | int err = 0; | 3845 | int err = 0; |
3024 | unsigned long flags; | 3846 | unsigned long flags; |
3847 | int retries = MAX_HOST_RESET_RETRIES; | ||
3025 | 3848 | ||
3026 | err = ufshcd_host_reset_and_restore(hba); | 3849 | do { |
3850 | err = ufshcd_host_reset_and_restore(hba); | ||
3851 | } while (err && --retries); | ||
3027 | 3852 | ||
3028 | /* | 3853 | /* |
3029 | * After reset the door-bell might be cleared, complete | 3854 | * After reset the door-bell might be cleared, complete |
@@ -3051,6 +3876,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |||
3051 | 3876 | ||
3052 | hba = shost_priv(cmd->device->host); | 3877 | hba = shost_priv(cmd->device->host); |
3053 | 3878 | ||
3879 | ufshcd_hold(hba, false); | ||
3054 | /* | 3880 | /* |
3055 | * Check if there is any race with fatal error handling. | 3881 | * Check if there is any race with fatal error handling. |
3056 | * If so, wait for it to complete. Even though fatal error | 3882 | * If so, wait for it to complete. Even though fatal error |
@@ -3084,56 +3910,232 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) | |||
3084 | ufshcd_clear_eh_in_progress(hba); | 3910 | ufshcd_clear_eh_in_progress(hba); |
3085 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 3911 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
3086 | 3912 | ||
3913 | ufshcd_release(hba); | ||
3087 | return err; | 3914 | return err; |
3088 | } | 3915 | } |
3089 | 3916 | ||
3090 | /** | 3917 | /** |
3091 | * ufshcd_read_sdev_qdepth - read the lun command queue depth | 3918 | * ufshcd_get_max_icc_level - calculate the ICC level |
3092 | * @hba: Pointer to adapter instance | 3919 | * @sup_curr_uA: max. current supported by the regulator |
3093 | * @sdev: pointer to SCSI device | 3920 | * @start_scan: row at the desc table to start scan from |
3921 | * @buff: power descriptor buffer | ||
3094 | * | 3922 | * |
3095 | * Return in case of success the lun's queue depth else error. | 3923 | * Returns calculated max ICC level for specific regulator |
3096 | */ | 3924 | */ |
3097 | static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, | 3925 | static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff) |
3098 | struct scsi_device *sdev) | 3926 | { |
3927 | int i; | ||
3928 | int curr_uA; | ||
3929 | u16 data; | ||
3930 | u16 unit; | ||
3931 | |||
3932 | for (i = start_scan; i >= 0; i--) { | ||
3933 | data = be16_to_cpu(*((u16 *)(buff + 2*i))); | ||
3934 | unit = (data & ATTR_ICC_LVL_UNIT_MASK) >> | ||
3935 | ATTR_ICC_LVL_UNIT_OFFSET; | ||
3936 | curr_uA = data & ATTR_ICC_LVL_VALUE_MASK; | ||
3937 | switch (unit) { | ||
3938 | case UFSHCD_NANO_AMP: | ||
3939 | curr_uA = curr_uA / 1000; | ||
3940 | break; | ||
3941 | case UFSHCD_MILI_AMP: | ||
3942 | curr_uA = curr_uA * 1000; | ||
3943 | break; | ||
3944 | case UFSHCD_AMP: | ||
3945 | curr_uA = curr_uA * 1000 * 1000; | ||
3946 | break; | ||
3947 | case UFSHCD_MICRO_AMP: | ||
3948 | default: | ||
3949 | break; | ||
3950 | } | ||
3951 | if (sup_curr_uA >= curr_uA) | ||
3952 | break; | ||
3953 | } | ||
3954 | if (i < 0) { | ||
3955 | i = 0; | ||
3956 | pr_err("%s: Couldn't find valid icc_level = %d", __func__, i); | ||
3957 | } | ||
3958 | |||
3959 | return (u32)i; | ||
3960 | } | ||
3961 | |||
3962 | /** | ||
3963 | * ufshcd_calc_icc_level - calculate the max ICC level | ||
3964 | * In case regulators are not initialized we'll return 0 | ||
3965 | * @hba: per-adapter instance | ||
3966 | * @desc_buf: power descriptor buffer to extract ICC levels from. | ||
3967 | * @len: length of desc_buff | ||
3968 | * | ||
3969 | * Returns calculated ICC level | ||
3970 | */ | ||
3971 | static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, | ||
3972 | u8 *desc_buf, int len) | ||
3973 | { | ||
3974 | u32 icc_level = 0; | ||
3975 | |||
3976 | if (!hba->vreg_info.vcc || !hba->vreg_info.vccq || | ||
3977 | !hba->vreg_info.vccq2) { | ||
3978 | dev_err(hba->dev, | ||
3979 | "%s: Regulator capability was not set, actvIccLevel=%d", | ||
3980 | __func__, icc_level); | ||
3981 | goto out; | ||
3982 | } | ||
3983 | |||
3984 | if (hba->vreg_info.vcc) | ||
3985 | icc_level = ufshcd_get_max_icc_level( | ||
3986 | hba->vreg_info.vcc->max_uA, | ||
3987 | POWER_DESC_MAX_ACTV_ICC_LVLS - 1, | ||
3988 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]); | ||
3989 | |||
3990 | if (hba->vreg_info.vccq) | ||
3991 | icc_level = ufshcd_get_max_icc_level( | ||
3992 | hba->vreg_info.vccq->max_uA, | ||
3993 | icc_level, | ||
3994 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]); | ||
3995 | |||
3996 | if (hba->vreg_info.vccq2) | ||
3997 | icc_level = ufshcd_get_max_icc_level( | ||
3998 | hba->vreg_info.vccq2->max_uA, | ||
3999 | icc_level, | ||
4000 | &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]); | ||
4001 | out: | ||
4002 | return icc_level; | ||
4003 | } | ||
4004 | |||
4005 | static void ufshcd_init_icc_levels(struct ufs_hba *hba) | ||
3099 | { | 4006 | { |
3100 | int ret; | 4007 | int ret; |
3101 | int buff_len = UNIT_DESC_MAX_SIZE; | 4008 | int buff_len = QUERY_DESC_POWER_MAX_SIZE; |
3102 | u8 desc_buf[UNIT_DESC_MAX_SIZE]; | 4009 | u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; |
3103 | 4010 | ||
3104 | ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, | 4011 | ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); |
3105 | QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len); | 4012 | if (ret) { |
4013 | dev_err(hba->dev, | ||
4014 | "%s: Failed reading power descriptor.len = %d ret = %d", | ||
4015 | __func__, buff_len, ret); | ||
4016 | return; | ||
4017 | } | ||
4018 | |||
4019 | hba->init_prefetch_data.icc_level = | ||
4020 | ufshcd_find_max_sup_active_icc_level(hba, | ||
4021 | desc_buf, buff_len); | ||
4022 | dev_dbg(hba->dev, "%s: setting icc_level 0x%x", | ||
4023 | __func__, hba->init_prefetch_data.icc_level); | ||
3106 | 4024 | ||
3107 | if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) { | 4025 | ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, |
4026 | QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, | ||
4027 | &hba->init_prefetch_data.icc_level); | ||
4028 | |||
4029 | if (ret) | ||
3108 | dev_err(hba->dev, | 4030 | dev_err(hba->dev, |
3109 | "%s:Failed reading unit descriptor. len = %d ret = %d" | 4031 | "%s: Failed configuring bActiveICCLevel = %d ret = %d", |
3110 | , __func__, buff_len, ret); | 4032 | __func__, hba->init_prefetch_data.icc_level , ret); |
3111 | if (!ret) | ||
3112 | ret = -EINVAL; | ||
3113 | 4033 | ||
4034 | } | ||
4035 | |||
4036 | /** | ||
4037 | * ufshcd_scsi_add_wlus - Adds required W-LUs | ||
4038 | * @hba: per-adapter instance | ||
4039 | * | ||
4040 | * UFS device specification requires the UFS devices to support 4 well known | ||
4041 | * logical units: | ||
4042 | * "REPORT_LUNS" (address: 01h) | ||
4043 | * "UFS Device" (address: 50h) | ||
4044 | * "RPMB" (address: 44h) | ||
4045 | * "BOOT" (address: 30h) | ||
4046 | * UFS device's power management needs to be controlled by "POWER CONDITION" | ||
4047 | * field of SSU (START STOP UNIT) command. But this "power condition" field | ||
4048 | * will take effect only when its sent to "UFS device" well known logical unit | ||
4049 | * hence we require the scsi_device instance to represent this logical unit in | ||
4050 | * order for the UFS host driver to send the SSU command for power management. | ||
4051 | |||
4052 | * We also require the scsi_device instance for "RPMB" (Replay Protected Memory | ||
4053 | * Block) LU so user space process can control this LU. User space may also | ||
4054 | * want to have access to BOOT LU. | ||
4055 | |||
4056 | * This function adds scsi device instances for each of all well known LUs | ||
4057 | * (except "REPORT LUNS" LU). | ||
4058 | * | ||
4059 | * Returns zero on success (all required W-LUs are added successfully), | ||
4060 | * non-zero error value on failure (if failed to add any of the required W-LU). | ||
4061 | */ | ||
4062 | static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) | ||
4063 | { | ||
4064 | int ret = 0; | ||
4065 | |||
4066 | hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, | ||
4067 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); | ||
4068 | if (IS_ERR(hba->sdev_ufs_device)) { | ||
4069 | ret = PTR_ERR(hba->sdev_ufs_device); | ||
4070 | hba->sdev_ufs_device = NULL; | ||
3114 | goto out; | 4071 | goto out; |
3115 | } | 4072 | } |
3116 | 4073 | ||
3117 | ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF; | 4074 | hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, |
4075 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); | ||
4076 | if (IS_ERR(hba->sdev_boot)) { | ||
4077 | ret = PTR_ERR(hba->sdev_boot); | ||
4078 | hba->sdev_boot = NULL; | ||
4079 | goto remove_sdev_ufs_device; | ||
4080 | } | ||
4081 | |||
4082 | hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, | ||
4083 | ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); | ||
4084 | if (IS_ERR(hba->sdev_rpmb)) { | ||
4085 | ret = PTR_ERR(hba->sdev_rpmb); | ||
4086 | hba->sdev_rpmb = NULL; | ||
4087 | goto remove_sdev_boot; | ||
4088 | } | ||
4089 | goto out; | ||
4090 | |||
4091 | remove_sdev_boot: | ||
4092 | scsi_remove_device(hba->sdev_boot); | ||
4093 | remove_sdev_ufs_device: | ||
4094 | scsi_remove_device(hba->sdev_ufs_device); | ||
3118 | out: | 4095 | out: |
3119 | return ret; | 4096 | return ret; |
3120 | } | 4097 | } |
3121 | 4098 | ||
3122 | /** | 4099 | /** |
3123 | * ufshcd_async_scan - asynchronous execution for link startup | 4100 | * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by |
3124 | * @data: data pointer to pass to this function | 4101 | * ufshcd_scsi_add_wlus() |
3125 | * @cookie: cookie data | 4102 | * @hba: per-adapter instance |
4103 | * | ||
3126 | */ | 4104 | */ |
3127 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | 4105 | static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba) |
4106 | { | ||
4107 | if (hba->sdev_ufs_device) { | ||
4108 | scsi_remove_device(hba->sdev_ufs_device); | ||
4109 | hba->sdev_ufs_device = NULL; | ||
4110 | } | ||
4111 | |||
4112 | if (hba->sdev_boot) { | ||
4113 | scsi_remove_device(hba->sdev_boot); | ||
4114 | hba->sdev_boot = NULL; | ||
4115 | } | ||
4116 | |||
4117 | if (hba->sdev_rpmb) { | ||
4118 | scsi_remove_device(hba->sdev_rpmb); | ||
4119 | hba->sdev_rpmb = NULL; | ||
4120 | } | ||
4121 | } | ||
4122 | |||
4123 | /** | ||
4124 | * ufshcd_probe_hba - probe hba to detect device and initialize | ||
4125 | * @hba: per-adapter instance | ||
4126 | * | ||
4127 | * Execute link-startup and verify device initialization | ||
4128 | */ | ||
4129 | static int ufshcd_probe_hba(struct ufs_hba *hba) | ||
3128 | { | 4130 | { |
3129 | struct ufs_hba *hba = (struct ufs_hba *)data; | ||
3130 | int ret; | 4131 | int ret; |
3131 | 4132 | ||
3132 | ret = ufshcd_link_startup(hba); | 4133 | ret = ufshcd_link_startup(hba); |
3133 | if (ret) | 4134 | if (ret) |
3134 | goto out; | 4135 | goto out; |
3135 | 4136 | ||
3136 | ufshcd_config_max_pwr_mode(hba); | 4137 | /* UniPro link is active now */ |
4138 | ufshcd_set_link_active(hba); | ||
3137 | 4139 | ||
3138 | ret = ufshcd_verify_dev_init(hba); | 4140 | ret = ufshcd_verify_dev_init(hba); |
3139 | if (ret) | 4141 | if (ret) |
@@ -3143,16 +4145,77 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |||
3143 | if (ret) | 4145 | if (ret) |
3144 | goto out; | 4146 | goto out; |
3145 | 4147 | ||
4148 | /* UFS device is also active now */ | ||
4149 | ufshcd_set_ufs_dev_active(hba); | ||
3146 | ufshcd_force_reset_auto_bkops(hba); | 4150 | ufshcd_force_reset_auto_bkops(hba); |
3147 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; | 4151 | hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; |
4152 | hba->wlun_dev_clr_ua = true; | ||
4153 | |||
4154 | if (ufshcd_get_max_pwr_mode(hba)) { | ||
4155 | dev_err(hba->dev, | ||
4156 | "%s: Failed getting max supported power mode\n", | ||
4157 | __func__); | ||
4158 | } else { | ||
4159 | ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info); | ||
4160 | if (ret) | ||
4161 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", | ||
4162 | __func__, ret); | ||
4163 | } | ||
4164 | |||
4165 | /* | ||
4166 | * If we are in error handling context or in power management callbacks | ||
4167 | * context, no need to scan the host | ||
4168 | */ | ||
4169 | if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | ||
4170 | bool flag; | ||
4171 | |||
4172 | /* clear any previous UFS device information */ | ||
4173 | memset(&hba->dev_info, 0, sizeof(hba->dev_info)); | ||
4174 | if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, | ||
4175 | QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) | ||
4176 | hba->dev_info.f_power_on_wp_en = flag; | ||
4177 | |||
4178 | if (!hba->is_init_prefetch) | ||
4179 | ufshcd_init_icc_levels(hba); | ||
4180 | |||
4181 | /* Add required well known logical units to scsi mid layer */ | ||
4182 | if (ufshcd_scsi_add_wlus(hba)) | ||
4183 | goto out; | ||
3148 | 4184 | ||
3149 | /* If we are in error handling context no need to scan the host */ | ||
3150 | if (!ufshcd_eh_in_progress(hba)) { | ||
3151 | scsi_scan_host(hba->host); | 4185 | scsi_scan_host(hba->host); |
3152 | pm_runtime_put_sync(hba->dev); | 4186 | pm_runtime_put_sync(hba->dev); |
3153 | } | 4187 | } |
4188 | |||
4189 | if (!hba->is_init_prefetch) | ||
4190 | hba->is_init_prefetch = true; | ||
4191 | |||
4192 | /* Resume devfreq after UFS device is detected */ | ||
4193 | if (ufshcd_is_clkscaling_enabled(hba)) | ||
4194 | devfreq_resume_device(hba->devfreq); | ||
4195 | |||
3154 | out: | 4196 | out: |
3155 | return; | 4197 | /* |
4198 | * If we failed to initialize the device or the device is not | ||
4199 | * present, turn off the power/clocks etc. | ||
4200 | */ | ||
4201 | if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { | ||
4202 | pm_runtime_put_sync(hba->dev); | ||
4203 | ufshcd_hba_exit(hba); | ||
4204 | } | ||
4205 | |||
4206 | return ret; | ||
4207 | } | ||
4208 | |||
4209 | /** | ||
4210 | * ufshcd_async_scan - asynchronous execution for probing hba | ||
4211 | * @data: data pointer to pass to this function | ||
4212 | * @cookie: cookie data | ||
4213 | */ | ||
4214 | static void ufshcd_async_scan(void *data, async_cookie_t cookie) | ||
4215 | { | ||
4216 | struct ufs_hba *hba = (struct ufs_hba *)data; | ||
4217 | |||
4218 | ufshcd_probe_hba(hba); | ||
3156 | } | 4219 | } |
3157 | 4220 | ||
3158 | static struct scsi_host_template ufshcd_driver_template = { | 4221 | static struct scsi_host_template ufshcd_driver_template = { |
@@ -3171,70 +4234,956 @@ static struct scsi_host_template ufshcd_driver_template = { | |||
3171 | .sg_tablesize = SG_ALL, | 4234 | .sg_tablesize = SG_ALL, |
3172 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, | 4235 | .cmd_per_lun = UFSHCD_CMD_PER_LUN, |
3173 | .can_queue = UFSHCD_CAN_QUEUE, | 4236 | .can_queue = UFSHCD_CAN_QUEUE, |
4237 | .max_host_blocked = 1, | ||
3174 | }; | 4238 | }; |
3175 | 4239 | ||
4240 | static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, | ||
4241 | int ua) | ||
4242 | { | ||
4243 | int ret = 0; | ||
4244 | struct regulator *reg = vreg->reg; | ||
4245 | const char *name = vreg->name; | ||
4246 | |||
4247 | BUG_ON(!vreg); | ||
4248 | |||
4249 | ret = regulator_set_optimum_mode(reg, ua); | ||
4250 | if (ret >= 0) { | ||
4251 | /* | ||
4252 | * regulator_set_optimum_mode() returns new regulator | ||
4253 | * mode upon success. | ||
4254 | */ | ||
4255 | ret = 0; | ||
4256 | } else { | ||
4257 | dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n", | ||
4258 | __func__, name, ua, ret); | ||
4259 | } | ||
4260 | |||
4261 | return ret; | ||
4262 | } | ||
4263 | |||
4264 | static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, | ||
4265 | struct ufs_vreg *vreg) | ||
4266 | { | ||
4267 | return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); | ||
4268 | } | ||
4269 | |||
4270 | static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, | ||
4271 | struct ufs_vreg *vreg) | ||
4272 | { | ||
4273 | return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); | ||
4274 | } | ||
4275 | |||
4276 | static int ufshcd_config_vreg(struct device *dev, | ||
4277 | struct ufs_vreg *vreg, bool on) | ||
4278 | { | ||
4279 | int ret = 0; | ||
4280 | struct regulator *reg = vreg->reg; | ||
4281 | const char *name = vreg->name; | ||
4282 | int min_uV, uA_load; | ||
4283 | |||
4284 | BUG_ON(!vreg); | ||
4285 | |||
4286 | if (regulator_count_voltages(reg) > 0) { | ||
4287 | min_uV = on ? vreg->min_uV : 0; | ||
4288 | ret = regulator_set_voltage(reg, min_uV, vreg->max_uV); | ||
4289 | if (ret) { | ||
4290 | dev_err(dev, "%s: %s set voltage failed, err=%d\n", | ||
4291 | __func__, name, ret); | ||
4292 | goto out; | ||
4293 | } | ||
4294 | |||
4295 | uA_load = on ? vreg->max_uA : 0; | ||
4296 | ret = ufshcd_config_vreg_load(dev, vreg, uA_load); | ||
4297 | if (ret) | ||
4298 | goto out; | ||
4299 | } | ||
4300 | out: | ||
4301 | return ret; | ||
4302 | } | ||
4303 | |||
4304 | static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg) | ||
4305 | { | ||
4306 | int ret = 0; | ||
4307 | |||
4308 | if (!vreg || vreg->enabled) | ||
4309 | goto out; | ||
4310 | |||
4311 | ret = ufshcd_config_vreg(dev, vreg, true); | ||
4312 | if (!ret) | ||
4313 | ret = regulator_enable(vreg->reg); | ||
4314 | |||
4315 | if (!ret) | ||
4316 | vreg->enabled = true; | ||
4317 | else | ||
4318 | dev_err(dev, "%s: %s enable failed, err=%d\n", | ||
4319 | __func__, vreg->name, ret); | ||
4320 | out: | ||
4321 | return ret; | ||
4322 | } | ||
4323 | |||
4324 | static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg) | ||
4325 | { | ||
4326 | int ret = 0; | ||
4327 | |||
4328 | if (!vreg || !vreg->enabled) | ||
4329 | goto out; | ||
4330 | |||
4331 | ret = regulator_disable(vreg->reg); | ||
4332 | |||
4333 | if (!ret) { | ||
4334 | /* ignore errors on applying disable config */ | ||
4335 | ufshcd_config_vreg(dev, vreg, false); | ||
4336 | vreg->enabled = false; | ||
4337 | } else { | ||
4338 | dev_err(dev, "%s: %s disable failed, err=%d\n", | ||
4339 | __func__, vreg->name, ret); | ||
4340 | } | ||
4341 | out: | ||
4342 | return ret; | ||
4343 | } | ||
4344 | |||
4345 | static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on) | ||
4346 | { | ||
4347 | int ret = 0; | ||
4348 | struct device *dev = hba->dev; | ||
4349 | struct ufs_vreg_info *info = &hba->vreg_info; | ||
4350 | |||
4351 | if (!info) | ||
4352 | goto out; | ||
4353 | |||
4354 | ret = ufshcd_toggle_vreg(dev, info->vcc, on); | ||
4355 | if (ret) | ||
4356 | goto out; | ||
4357 | |||
4358 | ret = ufshcd_toggle_vreg(dev, info->vccq, on); | ||
4359 | if (ret) | ||
4360 | goto out; | ||
4361 | |||
4362 | ret = ufshcd_toggle_vreg(dev, info->vccq2, on); | ||
4363 | if (ret) | ||
4364 | goto out; | ||
4365 | |||
4366 | out: | ||
4367 | if (ret) { | ||
4368 | ufshcd_toggle_vreg(dev, info->vccq2, false); | ||
4369 | ufshcd_toggle_vreg(dev, info->vccq, false); | ||
4370 | ufshcd_toggle_vreg(dev, info->vcc, false); | ||
4371 | } | ||
4372 | return ret; | ||
4373 | } | ||
4374 | |||
4375 | static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on) | ||
4376 | { | ||
4377 | struct ufs_vreg_info *info = &hba->vreg_info; | ||
4378 | |||
4379 | if (info) | ||
4380 | return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on); | ||
4381 | |||
4382 | return 0; | ||
4383 | } | ||
4384 | |||
4385 | static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg) | ||
4386 | { | ||
4387 | int ret = 0; | ||
4388 | |||
4389 | if (!vreg) | ||
4390 | goto out; | ||
4391 | |||
4392 | vreg->reg = devm_regulator_get(dev, vreg->name); | ||
4393 | if (IS_ERR(vreg->reg)) { | ||
4394 | ret = PTR_ERR(vreg->reg); | ||
4395 | dev_err(dev, "%s: %s get failed, err=%d\n", | ||
4396 | __func__, vreg->name, ret); | ||
4397 | } | ||
4398 | out: | ||
4399 | return ret; | ||
4400 | } | ||
4401 | |||
4402 | static int ufshcd_init_vreg(struct ufs_hba *hba) | ||
4403 | { | ||
4404 | int ret = 0; | ||
4405 | struct device *dev = hba->dev; | ||
4406 | struct ufs_vreg_info *info = &hba->vreg_info; | ||
4407 | |||
4408 | if (!info) | ||
4409 | goto out; | ||
4410 | |||
4411 | ret = ufshcd_get_vreg(dev, info->vcc); | ||
4412 | if (ret) | ||
4413 | goto out; | ||
4414 | |||
4415 | ret = ufshcd_get_vreg(dev, info->vccq); | ||
4416 | if (ret) | ||
4417 | goto out; | ||
4418 | |||
4419 | ret = ufshcd_get_vreg(dev, info->vccq2); | ||
4420 | out: | ||
4421 | return ret; | ||
4422 | } | ||
4423 | |||
4424 | static int ufshcd_init_hba_vreg(struct ufs_hba *hba) | ||
4425 | { | ||
4426 | struct ufs_vreg_info *info = &hba->vreg_info; | ||
4427 | |||
4428 | if (info) | ||
4429 | return ufshcd_get_vreg(hba->dev, info->vdd_hba); | ||
4430 | |||
4431 | return 0; | ||
4432 | } | ||
4433 | |||
4434 | static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, | ||
4435 | bool skip_ref_clk) | ||
4436 | { | ||
4437 | int ret = 0; | ||
4438 | struct ufs_clk_info *clki; | ||
4439 | struct list_head *head = &hba->clk_list_head; | ||
4440 | unsigned long flags; | ||
4441 | |||
4442 | if (!head || list_empty(head)) | ||
4443 | goto out; | ||
4444 | |||
4445 | list_for_each_entry(clki, head, list) { | ||
4446 | if (!IS_ERR_OR_NULL(clki->clk)) { | ||
4447 | if (skip_ref_clk && !strcmp(clki->name, "ref_clk")) | ||
4448 | continue; | ||
4449 | |||
4450 | if (on && !clki->enabled) { | ||
4451 | ret = clk_prepare_enable(clki->clk); | ||
4452 | if (ret) { | ||
4453 | dev_err(hba->dev, "%s: %s prepare enable failed, %d\n", | ||
4454 | __func__, clki->name, ret); | ||
4455 | goto out; | ||
4456 | } | ||
4457 | } else if (!on && clki->enabled) { | ||
4458 | clk_disable_unprepare(clki->clk); | ||
4459 | } | ||
4460 | clki->enabled = on; | ||
4461 | dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__, | ||
4462 | clki->name, on ? "en" : "dis"); | ||
4463 | } | ||
4464 | } | ||
4465 | |||
4466 | if (hba->vops && hba->vops->setup_clocks) | ||
4467 | ret = hba->vops->setup_clocks(hba, on); | ||
4468 | out: | ||
4469 | if (ret) { | ||
4470 | list_for_each_entry(clki, head, list) { | ||
4471 | if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) | ||
4472 | clk_disable_unprepare(clki->clk); | ||
4473 | } | ||
4474 | } else if (!ret && on) { | ||
4475 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
4476 | hba->clk_gating.state = CLKS_ON; | ||
4477 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
4478 | } | ||
4479 | return ret; | ||
4480 | } | ||
4481 | |||
4482 | static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on) | ||
4483 | { | ||
4484 | return __ufshcd_setup_clocks(hba, on, false); | ||
4485 | } | ||
4486 | |||
4487 | static int ufshcd_init_clocks(struct ufs_hba *hba) | ||
4488 | { | ||
4489 | int ret = 0; | ||
4490 | struct ufs_clk_info *clki; | ||
4491 | struct device *dev = hba->dev; | ||
4492 | struct list_head *head = &hba->clk_list_head; | ||
4493 | |||
4494 | if (!head || list_empty(head)) | ||
4495 | goto out; | ||
4496 | |||
4497 | list_for_each_entry(clki, head, list) { | ||
4498 | if (!clki->name) | ||
4499 | continue; | ||
4500 | |||
4501 | clki->clk = devm_clk_get(dev, clki->name); | ||
4502 | if (IS_ERR(clki->clk)) { | ||
4503 | ret = PTR_ERR(clki->clk); | ||
4504 | dev_err(dev, "%s: %s clk get failed, %d\n", | ||
4505 | __func__, clki->name, ret); | ||
4506 | goto out; | ||
4507 | } | ||
4508 | |||
4509 | if (clki->max_freq) { | ||
4510 | ret = clk_set_rate(clki->clk, clki->max_freq); | ||
4511 | if (ret) { | ||
4512 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | ||
4513 | __func__, clki->name, | ||
4514 | clki->max_freq, ret); | ||
4515 | goto out; | ||
4516 | } | ||
4517 | clki->curr_freq = clki->max_freq; | ||
4518 | } | ||
4519 | dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, | ||
4520 | clki->name, clk_get_rate(clki->clk)); | ||
4521 | } | ||
4522 | out: | ||
4523 | return ret; | ||
4524 | } | ||
4525 | |||
4526 | static int ufshcd_variant_hba_init(struct ufs_hba *hba) | ||
4527 | { | ||
4528 | int err = 0; | ||
4529 | |||
4530 | if (!hba->vops) | ||
4531 | goto out; | ||
4532 | |||
4533 | if (hba->vops->init) { | ||
4534 | err = hba->vops->init(hba); | ||
4535 | if (err) | ||
4536 | goto out; | ||
4537 | } | ||
4538 | |||
4539 | if (hba->vops->setup_regulators) { | ||
4540 | err = hba->vops->setup_regulators(hba, true); | ||
4541 | if (err) | ||
4542 | goto out_exit; | ||
4543 | } | ||
4544 | |||
4545 | goto out; | ||
4546 | |||
4547 | out_exit: | ||
4548 | if (hba->vops->exit) | ||
4549 | hba->vops->exit(hba); | ||
4550 | out: | ||
4551 | if (err) | ||
4552 | dev_err(hba->dev, "%s: variant %s init failed err %d\n", | ||
4553 | __func__, hba->vops ? hba->vops->name : "", err); | ||
4554 | return err; | ||
4555 | } | ||
4556 | |||
4557 | static void ufshcd_variant_hba_exit(struct ufs_hba *hba) | ||
4558 | { | ||
4559 | if (!hba->vops) | ||
4560 | return; | ||
4561 | |||
4562 | if (hba->vops->setup_clocks) | ||
4563 | hba->vops->setup_clocks(hba, false); | ||
4564 | |||
4565 | if (hba->vops->setup_regulators) | ||
4566 | hba->vops->setup_regulators(hba, false); | ||
4567 | |||
4568 | if (hba->vops->exit) | ||
4569 | hba->vops->exit(hba); | ||
4570 | } | ||
4571 | |||
4572 | static int ufshcd_hba_init(struct ufs_hba *hba) | ||
4573 | { | ||
4574 | int err; | ||
4575 | |||
4576 | /* | ||
4577 | * Handle host controller power separately from the UFS device power | ||
4578 | * rails as it will help controlling the UFS host controller power | ||
4579 | * collapse easily which is different than UFS device power collapse. | ||
4580 | * Also, enable the host controller power before we go ahead with rest | ||
4581 | * of the initialization here. | ||
4582 | */ | ||
4583 | err = ufshcd_init_hba_vreg(hba); | ||
4584 | if (err) | ||
4585 | goto out; | ||
4586 | |||
4587 | err = ufshcd_setup_hba_vreg(hba, true); | ||
4588 | if (err) | ||
4589 | goto out; | ||
4590 | |||
4591 | err = ufshcd_init_clocks(hba); | ||
4592 | if (err) | ||
4593 | goto out_disable_hba_vreg; | ||
4594 | |||
4595 | err = ufshcd_setup_clocks(hba, true); | ||
4596 | if (err) | ||
4597 | goto out_disable_hba_vreg; | ||
4598 | |||
4599 | err = ufshcd_init_vreg(hba); | ||
4600 | if (err) | ||
4601 | goto out_disable_clks; | ||
4602 | |||
4603 | err = ufshcd_setup_vreg(hba, true); | ||
4604 | if (err) | ||
4605 | goto out_disable_clks; | ||
4606 | |||
4607 | err = ufshcd_variant_hba_init(hba); | ||
4608 | if (err) | ||
4609 | goto out_disable_vreg; | ||
4610 | |||
4611 | hba->is_powered = true; | ||
4612 | goto out; | ||
4613 | |||
4614 | out_disable_vreg: | ||
4615 | ufshcd_setup_vreg(hba, false); | ||
4616 | out_disable_clks: | ||
4617 | ufshcd_setup_clocks(hba, false); | ||
4618 | out_disable_hba_vreg: | ||
4619 | ufshcd_setup_hba_vreg(hba, false); | ||
4620 | out: | ||
4621 | return err; | ||
4622 | } | ||
4623 | |||
4624 | static void ufshcd_hba_exit(struct ufs_hba *hba) | ||
4625 | { | ||
4626 | if (hba->is_powered) { | ||
4627 | ufshcd_variant_hba_exit(hba); | ||
4628 | ufshcd_setup_vreg(hba, false); | ||
4629 | ufshcd_setup_clocks(hba, false); | ||
4630 | ufshcd_setup_hba_vreg(hba, false); | ||
4631 | hba->is_powered = false; | ||
4632 | } | ||
4633 | } | ||
4634 | |||
4635 | static int | ||
4636 | ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) | ||
4637 | { | ||
4638 | unsigned char cmd[6] = {REQUEST_SENSE, | ||
4639 | 0, | ||
4640 | 0, | ||
4641 | 0, | ||
4642 | SCSI_SENSE_BUFFERSIZE, | ||
4643 | 0}; | ||
4644 | char *buffer; | ||
4645 | int ret; | ||
4646 | |||
4647 | buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); | ||
4648 | if (!buffer) { | ||
4649 | ret = -ENOMEM; | ||
4650 | goto out; | ||
4651 | } | ||
4652 | |||
4653 | ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer, | ||
4654 | SCSI_SENSE_BUFFERSIZE, NULL, | ||
4655 | msecs_to_jiffies(1000), 3, NULL, REQ_PM); | ||
4656 | if (ret) | ||
4657 | pr_err("%s: failed with err %d\n", __func__, ret); | ||
4658 | |||
4659 | kfree(buffer); | ||
4660 | out: | ||
4661 | return ret; | ||
4662 | } | ||
4663 | |||
4664 | /** | ||
4665 | * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device | ||
4666 | * power mode | ||
4667 | * @hba: per adapter instance | ||
4668 | * @pwr_mode: device power mode to set | ||
4669 | * | ||
4670 | * Returns 0 if requested power mode is set successfully | ||
4671 | * Returns non-zero if failed to set the requested power mode | ||
4672 | */ | ||
4673 | static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | ||
4674 | enum ufs_dev_pwr_mode pwr_mode) | ||
4675 | { | ||
4676 | unsigned char cmd[6] = { START_STOP }; | ||
4677 | struct scsi_sense_hdr sshdr; | ||
4678 | struct scsi_device *sdp = hba->sdev_ufs_device; | ||
4679 | int ret; | ||
4680 | |||
4681 | if (!sdp || !scsi_device_online(sdp)) | ||
4682 | return -ENODEV; | ||
4683 | |||
4684 | /* | ||
4685 | * If scsi commands fail, the scsi mid-layer schedules scsi error- | ||
4686 | * handling, which would wait for host to be resumed. Since we know | ||
4687 | * we are functional while we are here, skip host resume in error | ||
4688 | * handling context. | ||
4689 | */ | ||
4690 | hba->host->eh_noresume = 1; | ||
4691 | if (hba->wlun_dev_clr_ua) { | ||
4692 | ret = ufshcd_send_request_sense(hba, sdp); | ||
4693 | if (ret) | ||
4694 | goto out; | ||
4695 | /* Unit attention condition is cleared now */ | ||
4696 | hba->wlun_dev_clr_ua = false; | ||
4697 | } | ||
4698 | |||
4699 | cmd[4] = pwr_mode << 4; | ||
4700 | |||
4701 | /* | ||
4702 | * Current function would be generally called from the power management | ||
4703 | * callbacks hence set the REQ_PM flag so that it doesn't resume the | ||
4704 | * already suspended childs. | ||
4705 | */ | ||
4706 | ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, | ||
4707 | START_STOP_TIMEOUT, 0, NULL, REQ_PM); | ||
4708 | if (ret) { | ||
4709 | sdev_printk(KERN_WARNING, sdp, | ||
4710 | "START_STOP failed for power mode: %d\n", pwr_mode); | ||
4711 | scsi_show_result(ret); | ||
4712 | if (driver_byte(ret) & DRIVER_SENSE) { | ||
4713 | scsi_show_sense_hdr(&sshdr); | ||
4714 | scsi_show_extd_sense(sshdr.asc, sshdr.ascq); | ||
4715 | } | ||
4716 | } | ||
4717 | |||
4718 | if (!ret) | ||
4719 | hba->curr_dev_pwr_mode = pwr_mode; | ||
4720 | out: | ||
4721 | hba->host->eh_noresume = 0; | ||
4722 | return ret; | ||
4723 | } | ||
4724 | |||
4725 | static int ufshcd_link_state_transition(struct ufs_hba *hba, | ||
4726 | enum uic_link_state req_link_state, | ||
4727 | int check_for_bkops) | ||
4728 | { | ||
4729 | int ret = 0; | ||
4730 | |||
4731 | if (req_link_state == hba->uic_link_state) | ||
4732 | return 0; | ||
4733 | |||
4734 | if (req_link_state == UIC_LINK_HIBERN8_STATE) { | ||
4735 | ret = ufshcd_uic_hibern8_enter(hba); | ||
4736 | if (!ret) | ||
4737 | ufshcd_set_link_hibern8(hba); | ||
4738 | else | ||
4739 | goto out; | ||
4740 | } | ||
4741 | /* | ||
4742 | * If autobkops is enabled, link can't be turned off because | ||
4743 | * turning off the link would also turn off the device. | ||
4744 | */ | ||
4745 | else if ((req_link_state == UIC_LINK_OFF_STATE) && | ||
4746 | (!check_for_bkops || (check_for_bkops && | ||
4747 | !hba->auto_bkops_enabled))) { | ||
4748 | /* | ||
4749 | * Change controller state to "reset state" which | ||
4750 | * should also put the link in off/reset state | ||
4751 | */ | ||
4752 | ufshcd_hba_stop(hba); | ||
4753 | /* | ||
4754 | * TODO: Check if we need any delay to make sure that | ||
4755 | * controller is reset | ||
4756 | */ | ||
4757 | ufshcd_set_link_off(hba); | ||
4758 | } | ||
4759 | |||
4760 | out: | ||
4761 | return ret; | ||
4762 | } | ||
4763 | |||
4764 | static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) | ||
4765 | { | ||
4766 | /* | ||
4767 | * If UFS device is either in UFS_Sleep turn off VCC rail to save some | ||
4768 | * power. | ||
4769 | * | ||
4770 | * If UFS device and link is in OFF state, all power supplies (VCC, | ||
4771 | * VCCQ, VCCQ2) can be turned off if power on write protect is not | ||
4772 | * required. If UFS link is inactive (Hibern8 or OFF state) and device | ||
4773 | * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode. | ||
4774 | * | ||
4775 | * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway | ||
4776 | * in low power state which would save some power. | ||
4777 | */ | ||
4778 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | ||
4779 | !hba->dev_info.is_lu_power_on_wp) { | ||
4780 | ufshcd_setup_vreg(hba, false); | ||
4781 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | ||
4782 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | ||
4783 | if (!ufshcd_is_link_active(hba)) { | ||
4784 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | ||
4785 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2); | ||
4786 | } | ||
4787 | } | ||
4788 | } | ||
4789 | |||
4790 | static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) | ||
4791 | { | ||
4792 | int ret = 0; | ||
4793 | |||
4794 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) && | ||
4795 | !hba->dev_info.is_lu_power_on_wp) { | ||
4796 | ret = ufshcd_setup_vreg(hba, true); | ||
4797 | } else if (!ufshcd_is_ufs_dev_active(hba)) { | ||
4798 | ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true); | ||
4799 | if (!ret && !ufshcd_is_link_active(hba)) { | ||
4800 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); | ||
4801 | if (ret) | ||
4802 | goto vcc_disable; | ||
4803 | ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); | ||
4804 | if (ret) | ||
4805 | goto vccq_lpm; | ||
4806 | } | ||
4807 | } | ||
4808 | goto out; | ||
4809 | |||
4810 | vccq_lpm: | ||
4811 | ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq); | ||
4812 | vcc_disable: | ||
4813 | ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false); | ||
4814 | out: | ||
4815 | return ret; | ||
4816 | } | ||
4817 | |||
4818 | static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) | ||
4819 | { | ||
4820 | if (ufshcd_is_link_off(hba)) | ||
4821 | ufshcd_setup_hba_vreg(hba, false); | ||
4822 | } | ||
4823 | |||
4824 | static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba) | ||
4825 | { | ||
4826 | if (ufshcd_is_link_off(hba)) | ||
4827 | ufshcd_setup_hba_vreg(hba, true); | ||
4828 | } | ||
4829 | |||
3176 | /** | 4830 | /** |
3177 | * ufshcd_suspend - suspend power management function | 4831 | * ufshcd_suspend - helper function for suspend operations |
3178 | * @hba: per adapter instance | 4832 | * @hba: per adapter instance |
3179 | * @state: power state | 4833 | * @pm_op: desired low power operation type |
4834 | * | ||
4835 | * This function will try to put the UFS device and link into low power | ||
4836 | * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl" | ||
4837 | * (System PM level). | ||
4838 | * | ||
4839 | * If this function is called during shutdown, it will make sure that | ||
4840 | * both UFS device and UFS link is powered off. | ||
3180 | * | 4841 | * |
3181 | * Returns -ENOSYS | 4842 | * NOTE: UFS device & link must be active before we enter in this function. |
4843 | * | ||
4844 | * Returns 0 for success and non-zero for failure | ||
3182 | */ | 4845 | */ |
3183 | int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) | 4846 | static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
3184 | { | 4847 | { |
4848 | int ret = 0; | ||
4849 | enum ufs_pm_level pm_lvl; | ||
4850 | enum ufs_dev_pwr_mode req_dev_pwr_mode; | ||
4851 | enum uic_link_state req_link_state; | ||
4852 | |||
4853 | hba->pm_op_in_progress = 1; | ||
4854 | if (!ufshcd_is_shutdown_pm(pm_op)) { | ||
4855 | pm_lvl = ufshcd_is_runtime_pm(pm_op) ? | ||
4856 | hba->rpm_lvl : hba->spm_lvl; | ||
4857 | req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl); | ||
4858 | req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl); | ||
4859 | } else { | ||
4860 | req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE; | ||
4861 | req_link_state = UIC_LINK_OFF_STATE; | ||
4862 | } | ||
4863 | |||
3185 | /* | 4864 | /* |
3186 | * TODO: | 4865 | * If we can't transition into any of the low power modes |
3187 | * 1. Block SCSI requests from SCSI midlayer | 4866 | * just gate the clocks. |
3188 | * 2. Change the internal driver state to non operational | ||
3189 | * 3. Set UTRLRSR and UTMRLRSR bits to zero | ||
3190 | * 4. Wait until outstanding commands are completed | ||
3191 | * 5. Set HCE to zero to send the UFS host controller to reset state | ||
3192 | */ | 4867 | */ |
4868 | ufshcd_hold(hba, false); | ||
4869 | hba->clk_gating.is_suspended = true; | ||
4870 | |||
4871 | if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && | ||
4872 | req_link_state == UIC_LINK_ACTIVE_STATE) { | ||
4873 | goto disable_clks; | ||
4874 | } | ||
4875 | |||
4876 | if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) && | ||
4877 | (req_link_state == hba->uic_link_state)) | ||
4878 | goto out; | ||
4879 | |||
4880 | /* UFS device & link must be active before we enter in this function */ | ||
4881 | if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { | ||
4882 | ret = -EINVAL; | ||
4883 | goto out; | ||
4884 | } | ||
3193 | 4885 | ||
3194 | return -ENOSYS; | 4886 | if (ufshcd_is_runtime_pm(pm_op)) { |
4887 | if (ufshcd_can_autobkops_during_suspend(hba)) { | ||
4888 | /* | ||
4889 | * The device is idle with no requests in the queue, | ||
4890 | * allow background operations if bkops status shows | ||
4891 | * that performance might be impacted. | ||
4892 | */ | ||
4893 | ret = ufshcd_urgent_bkops(hba); | ||
4894 | if (ret) | ||
4895 | goto enable_gating; | ||
4896 | } else { | ||
4897 | /* make sure that auto bkops is disabled */ | ||
4898 | ufshcd_disable_auto_bkops(hba); | ||
4899 | } | ||
4900 | } | ||
4901 | |||
4902 | if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && | ||
4903 | ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) || | ||
4904 | !ufshcd_is_runtime_pm(pm_op))) { | ||
4905 | /* ensure that bkops is disabled */ | ||
4906 | ufshcd_disable_auto_bkops(hba); | ||
4907 | ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); | ||
4908 | if (ret) | ||
4909 | goto enable_gating; | ||
4910 | } | ||
4911 | |||
4912 | ret = ufshcd_link_state_transition(hba, req_link_state, 1); | ||
4913 | if (ret) | ||
4914 | goto set_dev_active; | ||
4915 | |||
4916 | ufshcd_vreg_set_lpm(hba); | ||
4917 | |||
4918 | disable_clks: | ||
4919 | /* | ||
4920 | * The clock scaling needs access to controller registers. Hence, Wait | ||
4921 | * for pending clock scaling work to be done before clocks are | ||
4922 | * turned off. | ||
4923 | */ | ||
4924 | if (ufshcd_is_clkscaling_enabled(hba)) { | ||
4925 | devfreq_suspend_device(hba->devfreq); | ||
4926 | hba->clk_scaling.window_start_t = 0; | ||
4927 | } | ||
4928 | /* | ||
4929 | * Call vendor specific suspend callback. As these callbacks may access | ||
4930 | * vendor specific host controller register space call them before the | ||
4931 | * host clocks are ON. | ||
4932 | */ | ||
4933 | if (hba->vops && hba->vops->suspend) { | ||
4934 | ret = hba->vops->suspend(hba, pm_op); | ||
4935 | if (ret) | ||
4936 | goto set_link_active; | ||
4937 | } | ||
4938 | |||
4939 | if (hba->vops && hba->vops->setup_clocks) { | ||
4940 | ret = hba->vops->setup_clocks(hba, false); | ||
4941 | if (ret) | ||
4942 | goto vops_resume; | ||
4943 | } | ||
4944 | |||
4945 | if (!ufshcd_is_link_active(hba)) | ||
4946 | ufshcd_setup_clocks(hba, false); | ||
4947 | else | ||
4948 | /* If link is active, device ref_clk can't be switched off */ | ||
4949 | __ufshcd_setup_clocks(hba, false, true); | ||
4950 | |||
4951 | hba->clk_gating.state = CLKS_OFF; | ||
4952 | /* | ||
4953 | * Disable the host irq as host controller as there won't be any | ||
4954 | * host controller trasanction expected till resume. | ||
4955 | */ | ||
4956 | ufshcd_disable_irq(hba); | ||
4957 | /* Put the host controller in low power mode if possible */ | ||
4958 | ufshcd_hba_vreg_set_lpm(hba); | ||
4959 | goto out; | ||
4960 | |||
4961 | vops_resume: | ||
4962 | if (hba->vops && hba->vops->resume) | ||
4963 | hba->vops->resume(hba, pm_op); | ||
4964 | set_link_active: | ||
4965 | ufshcd_vreg_set_hpm(hba); | ||
4966 | if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) | ||
4967 | ufshcd_set_link_active(hba); | ||
4968 | else if (ufshcd_is_link_off(hba)) | ||
4969 | ufshcd_host_reset_and_restore(hba); | ||
4970 | set_dev_active: | ||
4971 | if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) | ||
4972 | ufshcd_disable_auto_bkops(hba); | ||
4973 | enable_gating: | ||
4974 | hba->clk_gating.is_suspended = false; | ||
4975 | ufshcd_release(hba); | ||
4976 | out: | ||
4977 | hba->pm_op_in_progress = 0; | ||
4978 | return ret; | ||
3195 | } | 4979 | } |
3196 | EXPORT_SYMBOL_GPL(ufshcd_suspend); | ||
3197 | 4980 | ||
3198 | /** | 4981 | /** |
3199 | * ufshcd_resume - resume power management function | 4982 | * ufshcd_resume - helper function for resume operations |
3200 | * @hba: per adapter instance | 4983 | * @hba: per adapter instance |
4984 | * @pm_op: runtime PM or system PM | ||
4985 | * | ||
4986 | * This function basically brings the UFS device, UniPro link and controller | ||
4987 | * to active state. | ||
3201 | * | 4988 | * |
3202 | * Returns -ENOSYS | 4989 | * Returns 0 for success and non-zero for failure |
3203 | */ | 4990 | */ |
3204 | int ufshcd_resume(struct ufs_hba *hba) | 4991 | static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) |
3205 | { | 4992 | { |
4993 | int ret; | ||
4994 | enum uic_link_state old_link_state; | ||
4995 | |||
4996 | hba->pm_op_in_progress = 1; | ||
4997 | old_link_state = hba->uic_link_state; | ||
4998 | |||
4999 | ufshcd_hba_vreg_set_hpm(hba); | ||
5000 | /* Make sure clocks are enabled before accessing controller */ | ||
5001 | ret = ufshcd_setup_clocks(hba, true); | ||
5002 | if (ret) | ||
5003 | goto out; | ||
5004 | |||
5005 | /* enable the host irq as host controller would be active soon */ | ||
5006 | ret = ufshcd_enable_irq(hba); | ||
5007 | if (ret) | ||
5008 | goto disable_irq_and_vops_clks; | ||
5009 | |||
5010 | ret = ufshcd_vreg_set_hpm(hba); | ||
5011 | if (ret) | ||
5012 | goto disable_irq_and_vops_clks; | ||
5013 | |||
5014 | /* | ||
5015 | * Call vendor specific resume callback. As these callbacks may access | ||
5016 | * vendor specific host controller register space call them when the | ||
5017 | * host clocks are ON. | ||
5018 | */ | ||
5019 | if (hba->vops && hba->vops->resume) { | ||
5020 | ret = hba->vops->resume(hba, pm_op); | ||
5021 | if (ret) | ||
5022 | goto disable_vreg; | ||
5023 | } | ||
5024 | |||
5025 | if (ufshcd_is_link_hibern8(hba)) { | ||
5026 | ret = ufshcd_uic_hibern8_exit(hba); | ||
5027 | if (!ret) | ||
5028 | ufshcd_set_link_active(hba); | ||
5029 | else | ||
5030 | goto vendor_suspend; | ||
5031 | } else if (ufshcd_is_link_off(hba)) { | ||
5032 | ret = ufshcd_host_reset_and_restore(hba); | ||
5033 | /* | ||
5034 | * ufshcd_host_reset_and_restore() should have already | ||
5035 | * set the link state as active | ||
5036 | */ | ||
5037 | if (ret || !ufshcd_is_link_active(hba)) | ||
5038 | goto vendor_suspend; | ||
5039 | } | ||
5040 | |||
5041 | if (!ufshcd_is_ufs_dev_active(hba)) { | ||
5042 | ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE); | ||
5043 | if (ret) | ||
5044 | goto set_old_link_state; | ||
5045 | } | ||
5046 | |||
3206 | /* | 5047 | /* |
3207 | * TODO: | 5048 | * If BKOPs operations are urgently needed at this moment then |
3208 | * 1. Set HCE to 1, to start the UFS host controller | 5049 | * keep auto-bkops enabled or else disable it. |
3209 | * initialization process | ||
3210 | * 2. Set UTRLRSR and UTMRLRSR bits to 1 | ||
3211 | * 3. Change the internal driver state to operational | ||
3212 | * 4. Unblock SCSI requests from SCSI midlayer | ||
3213 | */ | 5050 | */ |
5051 | ufshcd_urgent_bkops(hba); | ||
5052 | hba->clk_gating.is_suspended = false; | ||
5053 | |||
5054 | if (ufshcd_is_clkscaling_enabled(hba)) | ||
5055 | devfreq_resume_device(hba->devfreq); | ||
5056 | |||
5057 | /* Schedule clock gating in case of no access to UFS device yet */ | ||
5058 | ufshcd_release(hba); | ||
5059 | goto out; | ||
5060 | |||
5061 | set_old_link_state: | ||
5062 | ufshcd_link_state_transition(hba, old_link_state, 0); | ||
5063 | vendor_suspend: | ||
5064 | if (hba->vops && hba->vops->suspend) | ||
5065 | hba->vops->suspend(hba, pm_op); | ||
5066 | disable_vreg: | ||
5067 | ufshcd_vreg_set_lpm(hba); | ||
5068 | disable_irq_and_vops_clks: | ||
5069 | ufshcd_disable_irq(hba); | ||
5070 | ufshcd_setup_clocks(hba, false); | ||
5071 | out: | ||
5072 | hba->pm_op_in_progress = 0; | ||
5073 | return ret; | ||
5074 | } | ||
5075 | |||
5076 | /** | ||
5077 | * ufshcd_system_suspend - system suspend routine | ||
5078 | * @hba: per adapter instance | ||
5079 | * @pm_op: runtime PM or system PM | ||
5080 | * | ||
5081 | * Check the description of ufshcd_suspend() function for more details. | ||
5082 | * | ||
5083 | * Returns 0 for success and non-zero for failure | ||
5084 | */ | ||
5085 | int ufshcd_system_suspend(struct ufs_hba *hba) | ||
5086 | { | ||
5087 | int ret = 0; | ||
5088 | |||
5089 | if (!hba || !hba->is_powered) | ||
5090 | goto out; | ||
5091 | |||
5092 | if (pm_runtime_suspended(hba->dev)) { | ||
5093 | if (hba->rpm_lvl == hba->spm_lvl) | ||
5094 | /* | ||
5095 | * There is possibility that device may still be in | ||
5096 | * active state during the runtime suspend. | ||
5097 | */ | ||
5098 | if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) == | ||
5099 | hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled) | ||
5100 | goto out; | ||
5101 | |||
5102 | /* | ||
5103 | * UFS device and/or UFS link low power states during runtime | ||
5104 | * suspend seems to be different than what is expected during | ||
5105 | * system suspend. Hence runtime resume the devic & link and | ||
5106 | * let the system suspend low power states to take effect. | ||
5107 | * TODO: If resume takes longer time, we might have optimize | ||
5108 | * it in future by not resuming everything if possible. | ||
5109 | */ | ||
5110 | ret = ufshcd_runtime_resume(hba); | ||
5111 | if (ret) | ||
5112 | goto out; | ||
5113 | } | ||
5114 | |||
5115 | ret = ufshcd_suspend(hba, UFS_SYSTEM_PM); | ||
5116 | out: | ||
5117 | if (!ret) | ||
5118 | hba->is_sys_suspended = true; | ||
5119 | return ret; | ||
5120 | } | ||
5121 | EXPORT_SYMBOL(ufshcd_system_suspend); | ||
5122 | |||
5123 | /** | ||
5124 | * ufshcd_system_resume - system resume routine | ||
5125 | * @hba: per adapter instance | ||
5126 | * | ||
5127 | * Returns 0 for success and non-zero for failure | ||
5128 | */ | ||
5129 | |||
5130 | int ufshcd_system_resume(struct ufs_hba *hba) | ||
5131 | { | ||
5132 | if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) | ||
5133 | /* | ||
5134 | * Let the runtime resume take care of resuming | ||
5135 | * if runtime suspended. | ||
5136 | */ | ||
5137 | return 0; | ||
3214 | 5138 | ||
3215 | return -ENOSYS; | 5139 | return ufshcd_resume(hba, UFS_SYSTEM_PM); |
3216 | } | 5140 | } |
3217 | EXPORT_SYMBOL_GPL(ufshcd_resume); | 5141 | EXPORT_SYMBOL(ufshcd_system_resume); |
3218 | 5142 | ||
5143 | /** | ||
5144 | * ufshcd_runtime_suspend - runtime suspend routine | ||
5145 | * @hba: per adapter instance | ||
5146 | * | ||
5147 | * Check the description of ufshcd_suspend() function for more details. | ||
5148 | * | ||
5149 | * Returns 0 for success and non-zero for failure | ||
5150 | */ | ||
3219 | int ufshcd_runtime_suspend(struct ufs_hba *hba) | 5151 | int ufshcd_runtime_suspend(struct ufs_hba *hba) |
3220 | { | 5152 | { |
3221 | if (!hba) | 5153 | if (!hba || !hba->is_powered) |
3222 | return 0; | 5154 | return 0; |
3223 | 5155 | ||
3224 | /* | 5156 | return ufshcd_suspend(hba, UFS_RUNTIME_PM); |
3225 | * The device is idle with no requests in the queue, | ||
3226 | * allow background operations. | ||
3227 | */ | ||
3228 | return ufshcd_enable_auto_bkops(hba); | ||
3229 | } | 5157 | } |
3230 | EXPORT_SYMBOL(ufshcd_runtime_suspend); | 5158 | EXPORT_SYMBOL(ufshcd_runtime_suspend); |
3231 | 5159 | ||
5160 | /** | ||
5161 | * ufshcd_runtime_resume - runtime resume routine | ||
5162 | * @hba: per adapter instance | ||
5163 | * | ||
5164 | * This function basically brings the UFS device, UniPro link and controller | ||
5165 | * to active state. Following operations are done in this function: | ||
5166 | * | ||
5167 | * 1. Turn on all the controller related clocks | ||
5168 | * 2. Bring the UniPro link out of Hibernate state | ||
5169 | * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device | ||
5170 | * to active state. | ||
5171 | * 4. If auto-bkops is enabled on the device, disable it. | ||
5172 | * | ||
5173 | * So following would be the possible power state after this function return | ||
5174 | * successfully: | ||
5175 | * S1: UFS device in Active state with VCC rail ON | ||
5176 | * UniPro link in Active state | ||
5177 | * All the UFS/UniPro controller clocks are ON | ||
5178 | * | ||
5179 | * Returns 0 for success and non-zero for failure | ||
5180 | */ | ||
3232 | int ufshcd_runtime_resume(struct ufs_hba *hba) | 5181 | int ufshcd_runtime_resume(struct ufs_hba *hba) |
3233 | { | 5182 | { |
3234 | if (!hba) | 5183 | if (!hba || !hba->is_powered) |
3235 | return 0; | 5184 | return 0; |
3236 | 5185 | else | |
3237 | return ufshcd_disable_auto_bkops(hba); | 5186 | return ufshcd_resume(hba, UFS_RUNTIME_PM); |
3238 | } | 5187 | } |
3239 | EXPORT_SYMBOL(ufshcd_runtime_resume); | 5188 | EXPORT_SYMBOL(ufshcd_runtime_resume); |
3240 | 5189 | ||
@@ -3245,6 +5194,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba) | |||
3245 | EXPORT_SYMBOL(ufshcd_runtime_idle); | 5194 | EXPORT_SYMBOL(ufshcd_runtime_idle); |
3246 | 5195 | ||
3247 | /** | 5196 | /** |
5197 | * ufshcd_shutdown - shutdown routine | ||
5198 | * @hba: per adapter instance | ||
5199 | * | ||
5200 | * This function would power off both UFS device and UFS link. | ||
5201 | * | ||
5202 | * Returns 0 always to allow force shutdown even in case of errors. | ||
5203 | */ | ||
5204 | int ufshcd_shutdown(struct ufs_hba *hba) | ||
5205 | { | ||
5206 | int ret = 0; | ||
5207 | |||
5208 | if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) | ||
5209 | goto out; | ||
5210 | |||
5211 | if (pm_runtime_suspended(hba->dev)) { | ||
5212 | ret = ufshcd_runtime_resume(hba); | ||
5213 | if (ret) | ||
5214 | goto out; | ||
5215 | } | ||
5216 | |||
5217 | ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); | ||
5218 | out: | ||
5219 | if (ret) | ||
5220 | dev_err(hba->dev, "%s failed, err %d\n", __func__, ret); | ||
5221 | /* allow force shutdown even in case of errors */ | ||
5222 | return 0; | ||
5223 | } | ||
5224 | EXPORT_SYMBOL(ufshcd_shutdown); | ||
5225 | |||
5226 | /** | ||
3248 | * ufshcd_remove - de-allocate SCSI host and host memory space | 5227 | * ufshcd_remove - de-allocate SCSI host and host memory space |
3249 | * data structure memory | 5228 | * data structure memory |
3250 | * @hba - per adapter instance | 5229 | * @hba - per adapter instance |
@@ -3252,11 +5231,17 @@ EXPORT_SYMBOL(ufshcd_runtime_idle); | |||
3252 | void ufshcd_remove(struct ufs_hba *hba) | 5231 | void ufshcd_remove(struct ufs_hba *hba) |
3253 | { | 5232 | { |
3254 | scsi_remove_host(hba->host); | 5233 | scsi_remove_host(hba->host); |
5234 | ufshcd_scsi_remove_wlus(hba); | ||
3255 | /* disable interrupts */ | 5235 | /* disable interrupts */ |
3256 | ufshcd_disable_intr(hba, hba->intr_mask); | 5236 | ufshcd_disable_intr(hba, hba->intr_mask); |
3257 | ufshcd_hba_stop(hba); | 5237 | ufshcd_hba_stop(hba); |
3258 | 5238 | ||
3259 | scsi_host_put(hba->host); | 5239 | scsi_host_put(hba->host); |
5240 | |||
5241 | ufshcd_exit_clk_gating(hba); | ||
5242 | if (ufshcd_is_clkscaling_enabled(hba)) | ||
5243 | devfreq_remove_device(hba->devfreq); | ||
5244 | ufshcd_hba_exit(hba); | ||
3260 | } | 5245 | } |
3261 | EXPORT_SYMBOL_GPL(ufshcd_remove); | 5246 | EXPORT_SYMBOL_GPL(ufshcd_remove); |
3262 | 5247 | ||
@@ -3277,19 +5262,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba) | |||
3277 | } | 5262 | } |
3278 | 5263 | ||
3279 | /** | 5264 | /** |
3280 | * ufshcd_init - Driver initialization routine | 5265 | * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) |
3281 | * @dev: pointer to device handle | 5266 | * @dev: pointer to device handle |
3282 | * @hba_handle: driver private handle | 5267 | * @hba_handle: driver private handle |
3283 | * @mmio_base: base register address | ||
3284 | * @irq: Interrupt line of device | ||
3285 | * Returns 0 on success, non-zero value on failure | 5268 | * Returns 0 on success, non-zero value on failure |
3286 | */ | 5269 | */ |
3287 | int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | 5270 | int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) |
3288 | void __iomem *mmio_base, unsigned int irq) | ||
3289 | { | 5271 | { |
3290 | struct Scsi_Host *host; | 5272 | struct Scsi_Host *host; |
3291 | struct ufs_hba *hba; | 5273 | struct ufs_hba *hba; |
3292 | int err; | 5274 | int err = 0; |
3293 | 5275 | ||
3294 | if (!dev) { | 5276 | if (!dev) { |
3295 | dev_err(dev, | 5277 | dev_err(dev, |
@@ -3298,13 +5280,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
3298 | goto out_error; | 5280 | goto out_error; |
3299 | } | 5281 | } |
3300 | 5282 | ||
3301 | if (!mmio_base) { | ||
3302 | dev_err(dev, | ||
3303 | "Invalid memory reference for mmio_base is NULL\n"); | ||
3304 | err = -ENODEV; | ||
3305 | goto out_error; | ||
3306 | } | ||
3307 | |||
3308 | host = scsi_host_alloc(&ufshcd_driver_template, | 5283 | host = scsi_host_alloc(&ufshcd_driver_template, |
3309 | sizeof(struct ufs_hba)); | 5284 | sizeof(struct ufs_hba)); |
3310 | if (!host) { | 5285 | if (!host) { |
@@ -3315,9 +5290,146 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
3315 | hba = shost_priv(host); | 5290 | hba = shost_priv(host); |
3316 | hba->host = host; | 5291 | hba->host = host; |
3317 | hba->dev = dev; | 5292 | hba->dev = dev; |
5293 | *hba_handle = hba; | ||
5294 | |||
5295 | out_error: | ||
5296 | return err; | ||
5297 | } | ||
5298 | EXPORT_SYMBOL(ufshcd_alloc_host); | ||
5299 | |||
5300 | static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) | ||
5301 | { | ||
5302 | int ret = 0; | ||
5303 | struct ufs_clk_info *clki; | ||
5304 | struct list_head *head = &hba->clk_list_head; | ||
5305 | |||
5306 | if (!head || list_empty(head)) | ||
5307 | goto out; | ||
5308 | |||
5309 | list_for_each_entry(clki, head, list) { | ||
5310 | if (!IS_ERR_OR_NULL(clki->clk)) { | ||
5311 | if (scale_up && clki->max_freq) { | ||
5312 | if (clki->curr_freq == clki->max_freq) | ||
5313 | continue; | ||
5314 | ret = clk_set_rate(clki->clk, clki->max_freq); | ||
5315 | if (ret) { | ||
5316 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | ||
5317 | __func__, clki->name, | ||
5318 | clki->max_freq, ret); | ||
5319 | break; | ||
5320 | } | ||
5321 | clki->curr_freq = clki->max_freq; | ||
5322 | |||
5323 | } else if (!scale_up && clki->min_freq) { | ||
5324 | if (clki->curr_freq == clki->min_freq) | ||
5325 | continue; | ||
5326 | ret = clk_set_rate(clki->clk, clki->min_freq); | ||
5327 | if (ret) { | ||
5328 | dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n", | ||
5329 | __func__, clki->name, | ||
5330 | clki->min_freq, ret); | ||
5331 | break; | ||
5332 | } | ||
5333 | clki->curr_freq = clki->min_freq; | ||
5334 | } | ||
5335 | } | ||
5336 | dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, | ||
5337 | clki->name, clk_get_rate(clki->clk)); | ||
5338 | } | ||
5339 | if (hba->vops->clk_scale_notify) | ||
5340 | hba->vops->clk_scale_notify(hba); | ||
5341 | out: | ||
5342 | return ret; | ||
5343 | } | ||
5344 | |||
5345 | static int ufshcd_devfreq_target(struct device *dev, | ||
5346 | unsigned long *freq, u32 flags) | ||
5347 | { | ||
5348 | int err = 0; | ||
5349 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
5350 | |||
5351 | if (!ufshcd_is_clkscaling_enabled(hba)) | ||
5352 | return -EINVAL; | ||
5353 | |||
5354 | if (*freq == UINT_MAX) | ||
5355 | err = ufshcd_scale_clks(hba, true); | ||
5356 | else if (*freq == 0) | ||
5357 | err = ufshcd_scale_clks(hba, false); | ||
5358 | |||
5359 | return err; | ||
5360 | } | ||
5361 | |||
5362 | static int ufshcd_devfreq_get_dev_status(struct device *dev, | ||
5363 | struct devfreq_dev_status *stat) | ||
5364 | { | ||
5365 | struct ufs_hba *hba = dev_get_drvdata(dev); | ||
5366 | struct ufs_clk_scaling *scaling = &hba->clk_scaling; | ||
5367 | unsigned long flags; | ||
5368 | |||
5369 | if (!ufshcd_is_clkscaling_enabled(hba)) | ||
5370 | return -EINVAL; | ||
5371 | |||
5372 | memset(stat, 0, sizeof(*stat)); | ||
5373 | |||
5374 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
5375 | if (!scaling->window_start_t) | ||
5376 | goto start_window; | ||
5377 | |||
5378 | if (scaling->is_busy_started) | ||
5379 | scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), | ||
5380 | scaling->busy_start_t)); | ||
5381 | |||
5382 | stat->total_time = jiffies_to_usecs((long)jiffies - | ||
5383 | (long)scaling->window_start_t); | ||
5384 | stat->busy_time = scaling->tot_busy_t; | ||
5385 | start_window: | ||
5386 | scaling->window_start_t = jiffies; | ||
5387 | scaling->tot_busy_t = 0; | ||
5388 | |||
5389 | if (hba->outstanding_reqs) { | ||
5390 | scaling->busy_start_t = ktime_get(); | ||
5391 | scaling->is_busy_started = true; | ||
5392 | } else { | ||
5393 | scaling->busy_start_t = ktime_set(0, 0); | ||
5394 | scaling->is_busy_started = false; | ||
5395 | } | ||
5396 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
5397 | return 0; | ||
5398 | } | ||
5399 | |||
5400 | static struct devfreq_dev_profile ufs_devfreq_profile = { | ||
5401 | .polling_ms = 100, | ||
5402 | .target = ufshcd_devfreq_target, | ||
5403 | .get_dev_status = ufshcd_devfreq_get_dev_status, | ||
5404 | }; | ||
5405 | |||
5406 | /** | ||
5407 | * ufshcd_init - Driver initialization routine | ||
5408 | * @hba: per-adapter instance | ||
5409 | * @mmio_base: base register address | ||
5410 | * @irq: Interrupt line of device | ||
5411 | * Returns 0 on success, non-zero value on failure | ||
5412 | */ | ||
5413 | int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) | ||
5414 | { | ||
5415 | int err; | ||
5416 | struct Scsi_Host *host = hba->host; | ||
5417 | struct device *dev = hba->dev; | ||
5418 | |||
5419 | if (!mmio_base) { | ||
5420 | dev_err(hba->dev, | ||
5421 | "Invalid memory reference for mmio_base is NULL\n"); | ||
5422 | err = -ENODEV; | ||
5423 | goto out_error; | ||
5424 | } | ||
5425 | |||
3318 | hba->mmio_base = mmio_base; | 5426 | hba->mmio_base = mmio_base; |
3319 | hba->irq = irq; | 5427 | hba->irq = irq; |
3320 | 5428 | ||
5429 | err = ufshcd_hba_init(hba); | ||
5430 | if (err) | ||
5431 | goto out_error; | ||
5432 | |||
3321 | /* Read capabilities registers */ | 5433 | /* Read capabilities registers */ |
3322 | ufshcd_hba_capabilities(hba); | 5434 | ufshcd_hba_capabilities(hba); |
3323 | 5435 | ||
@@ -3346,11 +5458,13 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
3346 | host->can_queue = hba->nutrs; | 5458 | host->can_queue = hba->nutrs; |
3347 | host->cmd_per_lun = hba->nutrs; | 5459 | host->cmd_per_lun = hba->nutrs; |
3348 | host->max_id = UFSHCD_MAX_ID; | 5460 | host->max_id = UFSHCD_MAX_ID; |
3349 | host->max_lun = UFSHCD_MAX_LUNS; | 5461 | host->max_lun = UFS_MAX_LUNS; |
3350 | host->max_channel = UFSHCD_MAX_CHANNEL; | 5462 | host->max_channel = UFSHCD_MAX_CHANNEL; |
3351 | host->unique_id = host->host_no; | 5463 | host->unique_id = host->host_no; |
3352 | host->max_cmd_len = MAX_CDB_SIZE; | 5464 | host->max_cmd_len = MAX_CDB_SIZE; |
3353 | 5465 | ||
5466 | hba->max_pwr_info.is_valid = false; | ||
5467 | |||
3354 | /* Initailize wait queue for task management */ | 5468 | /* Initailize wait queue for task management */ |
3355 | init_waitqueue_head(&hba->tm_wq); | 5469 | init_waitqueue_head(&hba->tm_wq); |
3356 | init_waitqueue_head(&hba->tm_tag_wq); | 5470 | init_waitqueue_head(&hba->tm_tag_wq); |
@@ -3368,24 +5482,27 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
3368 | /* Initialize device management tag acquire wait queue */ | 5482 | /* Initialize device management tag acquire wait queue */ |
3369 | init_waitqueue_head(&hba->dev_cmd.tag_wq); | 5483 | init_waitqueue_head(&hba->dev_cmd.tag_wq); |
3370 | 5484 | ||
5485 | ufshcd_init_clk_gating(hba); | ||
3371 | /* IRQ registration */ | 5486 | /* IRQ registration */ |
3372 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); | 5487 | err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); |
3373 | if (err) { | 5488 | if (err) { |
3374 | dev_err(hba->dev, "request irq failed\n"); | 5489 | dev_err(hba->dev, "request irq failed\n"); |
3375 | goto out_disable; | 5490 | goto exit_gating; |
5491 | } else { | ||
5492 | hba->is_irq_enabled = true; | ||
3376 | } | 5493 | } |
3377 | 5494 | ||
3378 | /* Enable SCSI tag mapping */ | 5495 | /* Enable SCSI tag mapping */ |
3379 | err = scsi_init_shared_tag_map(host, host->can_queue); | 5496 | err = scsi_init_shared_tag_map(host, host->can_queue); |
3380 | if (err) { | 5497 | if (err) { |
3381 | dev_err(hba->dev, "init shared queue failed\n"); | 5498 | dev_err(hba->dev, "init shared queue failed\n"); |
3382 | goto out_disable; | 5499 | goto exit_gating; |
3383 | } | 5500 | } |
3384 | 5501 | ||
3385 | err = scsi_add_host(host, hba->dev); | 5502 | err = scsi_add_host(host, hba->dev); |
3386 | if (err) { | 5503 | if (err) { |
3387 | dev_err(hba->dev, "scsi_add_host failed\n"); | 5504 | dev_err(hba->dev, "scsi_add_host failed\n"); |
3388 | goto out_disable; | 5505 | goto exit_gating; |
3389 | } | 5506 | } |
3390 | 5507 | ||
3391 | /* Host controller enable */ | 5508 | /* Host controller enable */ |
@@ -3395,19 +5512,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, | |||
3395 | goto out_remove_scsi_host; | 5512 | goto out_remove_scsi_host; |
3396 | } | 5513 | } |
3397 | 5514 | ||
3398 | *hba_handle = hba; | 5515 | if (ufshcd_is_clkscaling_enabled(hba)) { |
5516 | hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile, | ||
5517 | "simple_ondemand", NULL); | ||
5518 | if (IS_ERR(hba->devfreq)) { | ||
5519 | dev_err(hba->dev, "Unable to register with devfreq %ld\n", | ||
5520 | PTR_ERR(hba->devfreq)); | ||
5521 | goto out_remove_scsi_host; | ||
5522 | } | ||
5523 | /* Suspend devfreq until the UFS device is detected */ | ||
5524 | devfreq_suspend_device(hba->devfreq); | ||
5525 | hba->clk_scaling.window_start_t = 0; | ||
5526 | } | ||
3399 | 5527 | ||
3400 | /* Hold auto suspend until async scan completes */ | 5528 | /* Hold auto suspend until async scan completes */ |
3401 | pm_runtime_get_sync(dev); | 5529 | pm_runtime_get_sync(dev); |
3402 | 5530 | ||
5531 | /* | ||
5532 | * The device-initialize-sequence hasn't been invoked yet. | ||
5533 | * Set the device to power-off state | ||
5534 | */ | ||
5535 | ufshcd_set_ufs_dev_poweroff(hba); | ||
5536 | |||
3403 | async_schedule(ufshcd_async_scan, hba); | 5537 | async_schedule(ufshcd_async_scan, hba); |
3404 | 5538 | ||
3405 | return 0; | 5539 | return 0; |
3406 | 5540 | ||
3407 | out_remove_scsi_host: | 5541 | out_remove_scsi_host: |
3408 | scsi_remove_host(hba->host); | 5542 | scsi_remove_host(hba->host); |
5543 | exit_gating: | ||
5544 | ufshcd_exit_clk_gating(hba); | ||
3409 | out_disable: | 5545 | out_disable: |
5546 | hba->is_irq_enabled = false; | ||
3410 | scsi_host_put(host); | 5547 | scsi_host_put(host); |
5548 | ufshcd_hba_exit(hba); | ||
3411 | out_error: | 5549 | out_error: |
3412 | return err; | 5550 | return err; |
3413 | } | 5551 | } |