summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
authorYaniv Gardi <ygardi@codeaurora.org>2015-10-28 07:15:51 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2015-11-09 18:03:55 -0500
commitf06fcc7155dcbcd9b697d499595a2c1a3945bda2 (patch)
tree16d733165dce040557335efc98e4c687c98fcddc /drivers/scsi/ufs
parent6e3fd44d7b7638e0f7e3331eaf7f90f3a629f3e7 (diff)
scsi: ufs-qcom: add QUniPro hardware support and power optimizations
New revisions of UFS host controller supports the new UniPro hardware controller (referred as QUniPro). This patch adds the support to enable this new UniPro controller hardware. This change also adds power optimization for bus scaling feature, as well as support for HS-G3 power mode. Reviewed-by: Subhash Jadavani <subhashj@codeaurora.org> Reviewed-by: Gilad Broner <gbroner@codeaurora.org> Signed-off-by: Yaniv Gardi <ygardi@codeaurora.org> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c640
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h31
-rw-r--r--drivers/scsi/ufs/ufshcd.c8
-rw-r--r--drivers/scsi/ufs/ufshcd.h27
4 files changed, 525 insertions, 181 deletions
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 16338083b0d3..4f38d008bfb4 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -44,11 +44,11 @@ enum {
44 44
45static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS]; 45static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
46 46
47static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result);
48static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
49 const char *speed_mode);
50static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote); 47static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
51static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); 48static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
49static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
50 u32 clk_cycles);
51
52static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, 52static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
53 char *prefix) 53 char *prefix)
54{ 54{
@@ -177,6 +177,7 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
177 177
178 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 178 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
179 &host->tx_l1_sync_clk); 179 &host->tx_l1_sync_clk);
180
180out: 181out:
181 return err; 182 return err;
182} 183}
@@ -209,7 +210,9 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
209 210
210 do { 211 do {
211 err = ufshcd_dme_get(hba, 212 err = ufshcd_dme_get(hba,
212 UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); 213 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
214 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
215 &tx_fsm_val);
213 if (err || tx_fsm_val == TX_FSM_HIBERN8) 216 if (err || tx_fsm_val == TX_FSM_HIBERN8)
214 break; 217 break;
215 218
@@ -223,7 +226,9 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
223 */ 226 */
224 if (time_after(jiffies, timeout)) 227 if (time_after(jiffies, timeout))
225 err = ufshcd_dme_get(hba, 228 err = ufshcd_dme_get(hba,
226 UIC_ARG_MIB(MPHY_TX_FSM_STATE), &tx_fsm_val); 229 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
230 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
231 &tx_fsm_val);
227 232
228 if (err) { 233 if (err) {
229 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", 234 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
@@ -237,6 +242,15 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
237 return err; 242 return err;
238} 243}
239 244
245static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
246{
247 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
248 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
249 REG_UFS_CFG1);
250 /* make sure above configuration is applied before we return */
251 mb();
252}
253
240static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) 254static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
241{ 255{
242 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 256 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -251,9 +265,11 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
251 usleep_range(1000, 1100); 265 usleep_range(1000, 1100);
252 266
253 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); 267 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
268
254 if (ret) { 269 if (ret) {
255 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n", 270 dev_err(hba->dev,
256 __func__, ret); 271 "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
272 __func__, ret);
257 goto out; 273 goto out;
258 } 274 }
259 275
@@ -274,9 +290,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
274 290
275 ret = ufs_qcom_phy_is_pcs_ready(phy); 291 ret = ufs_qcom_phy_is_pcs_ready(phy);
276 if (ret) 292 if (ret)
277 dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n", 293 dev_err(hba->dev,
294 "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
278 __func__, ret); 295 __func__, ret);
279 296
297 ufs_qcom_select_unipro_mode(host);
298
280out: 299out:
281 return ret; 300 return ret;
282} 301}
@@ -299,7 +318,8 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
299 mb(); 318 mb();
300} 319}
301 320
302static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status) 321static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
322 enum ufs_notify_change_status status)
303{ 323{
304 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 324 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
305 int err = 0; 325 int err = 0;
@@ -329,12 +349,12 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba, bool status)
329} 349}
330 350
331/** 351/**
332 * Returns non-zero for success (which rate of core_clk) and 0 352 * Returns zero for success and non-zero in case of a failure
333 * in case of a failure
334 */ 353 */
335static unsigned long 354static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
336ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate) 355 u32 hs, u32 rate, bool update_link_startup_timer)
337{ 356{
357 int ret = 0;
338 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 358 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
339 struct ufs_clk_info *clki; 359 struct ufs_clk_info *clki;
340 u32 core_clk_period_in_ns; 360 u32 core_clk_period_in_ns;
@@ -352,11 +372,13 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
352 static u32 hs_fr_table_rA[][2] = { 372 static u32 hs_fr_table_rA[][2] = {
353 {UFS_HS_G1, 0x1F}, 373 {UFS_HS_G1, 0x1F},
354 {UFS_HS_G2, 0x3e}, 374 {UFS_HS_G2, 0x3e},
375 {UFS_HS_G3, 0x7D},
355 }; 376 };
356 377
357 static u32 hs_fr_table_rB[][2] = { 378 static u32 hs_fr_table_rB[][2] = {
358 {UFS_HS_G1, 0x24}, 379 {UFS_HS_G1, 0x24},
359 {UFS_HS_G2, 0x49}, 380 {UFS_HS_G2, 0x49},
381 {UFS_HS_G3, 0x92},
360 }; 382 };
361 383
362 /* 384 /*
@@ -384,7 +406,17 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
384 core_clk_rate = DEFAULT_CLK_RATE_HZ; 406 core_clk_rate = DEFAULT_CLK_RATE_HZ;
385 407
386 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC; 408 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
387 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US); 409 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
410 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
411 /*
412 * make sure above write gets applied before we return from
413 * this function.
414 */
415 mb();
416 }
417
418 if (ufs_qcom_cap_qunipro(host))
419 goto out;
388 420
389 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate; 421 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
390 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG; 422 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@@ -434,35 +466,59 @@ ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear, u32 hs, u32 rate)
434 goto out_error; 466 goto out_error;
435 } 467 }
436 468
437 /* this register 2 fields shall be written at once */ 469 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
438 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us, 470 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
439 REG_UFS_TX_SYMBOL_CLK_NS_US); 471 /* this register 2 fields shall be written at once */
472 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
473 REG_UFS_TX_SYMBOL_CLK_NS_US);
474 /*
475 * make sure above write gets applied before we return from
476 * this function.
477 */
478 mb();
479 }
480
481 if (update_link_startup_timer) {
482 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
483 REG_UFS_PA_LINK_STARTUP_TIMER);
484 /*
485 * make sure that this configuration is applied before
486 * we return
487 */
488 mb();
489 }
440 goto out; 490 goto out;
441 491
442out_error: 492out_error:
443 core_clk_rate = 0; 493 ret = -EINVAL;
444out: 494out:
445 return core_clk_rate; 495 return ret;
446} 496}
447 497
448static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status) 498static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
499 enum ufs_notify_change_status status)
449{ 500{
450 unsigned long core_clk_rate = 0; 501 int err = 0;
451 u32 core_clk_cycles_per_100ms; 502 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
452 503
453 switch (status) { 504 switch (status) {
454 case PRE_CHANGE: 505 case PRE_CHANGE:
455 core_clk_rate = ufs_qcom_cfg_timers(hba, UFS_PWM_G1, 506 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
456 SLOWAUTO_MODE, 0); 507 0, true)) {
457 if (!core_clk_rate) {
458 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 508 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
459 __func__); 509 __func__);
460 return -EINVAL; 510 err = -EINVAL;
511 goto out;
461 } 512 }
462 core_clk_cycles_per_100ms = 513
463 (core_clk_rate / MSEC_PER_SEC) * 100; 514 if (ufs_qcom_cap_qunipro(host))
464 ufshcd_writel(hba, core_clk_cycles_per_100ms, 515 /*
465 REG_UFS_PA_LINK_STARTUP_TIMER); 516 * set unipro core clock cycles to 150 & clear clock
517 * divider
518 */
519 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
520 150);
521
466 break; 522 break;
467 case POST_CHANGE: 523 case POST_CHANGE:
468 ufs_qcom_link_startup_post_change(hba); 524 ufs_qcom_link_startup_post_change(hba);
@@ -471,7 +527,8 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba, bool status)
471 break; 527 break;
472 } 528 }
473 529
474 return 0; 530out:
531 return err;
475} 532}
476 533
477static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) 534static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
@@ -498,8 +555,10 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
498 * If UniPro link is not active, PHY ref_clk, main PHY analog power 555 * If UniPro link is not active, PHY ref_clk, main PHY analog power
499 * rail and low noise analog power rail for PLL can be switched off. 556 * rail and low noise analog power rail for PLL can be switched off.
500 */ 557 */
501 if (!ufs_qcom_is_link_active(hba)) 558 if (!ufs_qcom_is_link_active(hba)) {
559 ufs_qcom_disable_lane_clks(host);
502 phy_power_off(phy); 560 phy_power_off(phy);
561 }
503 562
504out: 563out:
505 return ret; 564 return ret;
@@ -518,6 +577,10 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
518 goto out; 577 goto out;
519 } 578 }
520 579
580 err = ufs_qcom_enable_lane_clks(host);
581 if (err)
582 goto out;
583
521 hba->is_sys_suspended = false; 584 hba->is_sys_suspended = false;
522 585
523out: 586out:
@@ -622,6 +685,81 @@ static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
622 return 0; 685 return 0;
623} 686}
624 687
688#ifdef CONFIG_MSM_BUS_SCALING
689static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
690 const char *speed_mode)
691{
692 struct device *dev = host->hba->dev;
693 struct device_node *np = dev->of_node;
694 int err;
695 const char *key = "qcom,bus-vector-names";
696
697 if (!speed_mode) {
698 err = -EINVAL;
699 goto out;
700 }
701
702 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
703 err = of_property_match_string(np, key, "MAX");
704 else
705 err = of_property_match_string(np, key, speed_mode);
706
707out:
708 if (err < 0)
709 dev_err(dev, "%s: Invalid %s mode %d\n",
710 __func__, speed_mode, err);
711 return err;
712}
713
714static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
715{
716 int gear = max_t(u32, p->gear_rx, p->gear_tx);
717 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
718 int pwr;
719
720 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
721 if (!gear)
722 gear = 1;
723
724 if (!lanes)
725 lanes = 1;
726
727 if (!p->pwr_rx && !p->pwr_tx) {
728 pwr = SLOWAUTO_MODE;
729 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
730 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
731 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
732 pwr = FAST_MODE;
733 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
734 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
735 } else {
736 pwr = SLOW_MODE;
737 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
738 "PWM", gear, lanes);
739 }
740}
741
742static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
743{
744 int err = 0;
745
746 if (vote != host->bus_vote.curr_vote) {
747 err = msm_bus_scale_client_update_request(
748 host->bus_vote.client_handle, vote);
749 if (err) {
750 dev_err(host->hba->dev,
751 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
752 __func__, host->bus_vote.client_handle,
753 vote, err);
754 goto out;
755 }
756
757 host->bus_vote.curr_vote = vote;
758 }
759out:
760 return err;
761}
762
625static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host) 763static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
626{ 764{
627 int vote; 765 int vote;
@@ -643,8 +781,132 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
643 return err; 781 return err;
644} 782}
645 783
784static ssize_t
785show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
786 char *buf)
787{
788 struct ufs_hba *hba = dev_get_drvdata(dev);
789 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
790
791 return snprintf(buf, PAGE_SIZE, "%u\n",
792 host->bus_vote.is_max_bw_needed);
793}
794
795static ssize_t
796store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
797 const char *buf, size_t count)
798{
799 struct ufs_hba *hba = dev_get_drvdata(dev);
800 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
801 uint32_t value;
802
803 if (!kstrtou32(buf, 0, &value)) {
804 host->bus_vote.is_max_bw_needed = !!value;
805 ufs_qcom_update_bus_bw_vote(host);
806 }
807
808 return count;
809}
810
811static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
812{
813 int err;
814 struct msm_bus_scale_pdata *bus_pdata;
815 struct device *dev = host->hba->dev;
816 struct platform_device *pdev = to_platform_device(dev);
817 struct device_node *np = dev->of_node;
818
819 bus_pdata = msm_bus_cl_get_pdata(pdev);
820 if (!bus_pdata) {
821 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
822 err = -ENODATA;
823 goto out;
824 }
825
826 err = of_property_count_strings(np, "qcom,bus-vector-names");
827 if (err < 0 || err != bus_pdata->num_usecases) {
828 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
829 __func__, err);
830 goto out;
831 }
832
833 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
834 if (!host->bus_vote.client_handle) {
835 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
836 __func__);
837 err = -EFAULT;
838 goto out;
839 }
840
841 /* cache the vote index for minimum and maximum bandwidth */
842 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
843 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
844
845 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
846 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
847 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
848 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
849 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
850 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
851out:
852 return err;
853}
854#else /* CONFIG_MSM_BUS_SCALING */
855static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
856{
857 return 0;
858}
859
860static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
861{
862 return 0;
863}
864
865static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
866{
867 return 0;
868}
869#endif /* CONFIG_MSM_BUS_SCALING */
870
871static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
872{
873 if (host->dev_ref_clk_ctrl_mmio &&
874 (enable ^ host->is_dev_ref_clk_enabled)) {
875 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
876
877 if (enable)
878 temp |= host->dev_ref_clk_en_mask;
879 else
880 temp &= ~host->dev_ref_clk_en_mask;
881
882 /*
883 * If we are here to disable this clock it might be immediately
884 * after entering into hibern8 in which case we need to make
885 * sure that device ref_clk is active at least 1us after the
886 * hibern8 enter.
887 */
888 if (!enable)
889 udelay(1);
890
891 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
892
893 /* ensure that ref_clk is enabled/disabled before we return */
894 wmb();
895
896 /*
897 * If we call hibern8 exit after this, we need to make sure that
898 * device ref_clk is stable for at least 1us before the hibern8
899 * exit command.
900 */
901 if (enable)
902 udelay(1);
903
904 host->is_dev_ref_clk_enabled = enable;
905 }
906}
907
646static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, 908static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
647 bool status, 909 enum ufs_notify_change_status status,
648 struct ufs_pa_layer_attr *dev_max_params, 910 struct ufs_pa_layer_attr *dev_max_params,
649 struct ufs_pa_layer_attr *dev_req_params) 911 struct ufs_pa_layer_attr *dev_req_params)
650{ 912{
@@ -677,6 +939,20 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
677 ufs_qcom_cap.desired_working_mode = 939 ufs_qcom_cap.desired_working_mode =
678 UFS_QCOM_LIMIT_DESIRED_MODE; 940 UFS_QCOM_LIMIT_DESIRED_MODE;
679 941
942 if (host->hw_ver.major == 0x1) {
943 /*
944 * HS-G3 operations may not reliably work on legacy QCOM
945 * UFS host controller hardware even though capability
946 * exchange during link startup phase may end up
947 * negotiating maximum supported gear as G3.
948 * Hence downgrade the maximum supported gear to HS-G2.
949 */
950 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
951 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
952 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
953 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
954 }
955
680 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap, 956 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
681 dev_max_params, 957 dev_max_params,
682 dev_req_params); 958 dev_req_params);
@@ -688,9 +964,9 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
688 964
689 break; 965 break;
690 case POST_CHANGE: 966 case POST_CHANGE:
691 if (!ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, 967 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
692 dev_req_params->pwr_rx, 968 dev_req_params->pwr_rx,
693 dev_req_params->hs_rate)) { 969 dev_req_params->hs_rate, false)) {
694 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n", 970 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
695 __func__); 971 __func__);
696 /* 972 /*
@@ -752,10 +1028,11 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
752 1028
753 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001) 1029 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
754 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR; 1030 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1031
1032 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
755 } 1033 }
756 1034
757 if (host->hw_ver.major >= 0x2) { 1035 if (host->hw_ver.major >= 0x2) {
758 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
759 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION; 1036 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
760 1037
761 if (!ufs_qcom_cap_qunipro(host)) 1038 if (!ufs_qcom_cap_qunipro(host))
@@ -770,77 +1047,27 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
770{ 1047{
771 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1048 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
772 1049
773 if (host->hw_ver.major >= 0x2) 1050 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
774 host->caps = UFS_QCOM_CAP_QUNIPRO; 1051 hba->caps |= UFSHCD_CAP_CLK_SCALING;
775} 1052 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
776
777static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
778 const char *speed_mode)
779{
780 struct device *dev = host->hba->dev;
781 struct device_node *np = dev->of_node;
782 int err;
783 const char *key = "qcom,bus-vector-names";
784
785 if (!speed_mode) {
786 err = -EINVAL;
787 goto out;
788 }
789
790 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
791 err = of_property_match_string(np, key, "MAX");
792 else
793 err = of_property_match_string(np, key, speed_mode);
794
795out:
796 if (err < 0)
797 dev_err(dev, "%s: Invalid %s mode %d\n",
798 __func__, speed_mode, err);
799 return err;
800}
801
802static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
803{
804 int err = 0;
805
806 if (vote != host->bus_vote.curr_vote)
807 host->bus_vote.curr_vote = vote;
808
809 return err;
810}
811
812static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
813{
814 int gear = max_t(u32, p->gear_rx, p->gear_tx);
815 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
816 int pwr;
817
818 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
819 if (!gear)
820 gear = 1;
821
822 if (!lanes)
823 lanes = 1;
824 1053
825 if (!p->pwr_rx && !p->pwr_tx) { 1054 if (host->hw_ver.major >= 0x2) {
826 pwr = SLOWAUTO_MODE; 1055 host->caps = UFS_QCOM_CAP_QUNIPRO |
827 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN"); 1056 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
828 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
829 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
830 pwr = FAST_MODE;
831 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
832 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
833 } else {
834 pwr = SLOW_MODE;
835 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
836 "PWM", gear, lanes);
837 } 1057 }
838} 1058}
839 1059
1060/**
1061 * ufs_qcom_setup_clocks - enables/disable clocks
1062 * @hba: host controller instance
1063 * @on: If true, enable clocks else disable them.
1064 *
1065 * Returns 0 on success, non-zero on failure.
1066 */
840static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on) 1067static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
841{ 1068{
842 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1069 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
843 int err = 0; 1070 int err;
844 int vote = 0; 1071 int vote = 0;
845 1072
846 /* 1073 /*
@@ -863,20 +1090,18 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
863 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1090 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
864 goto out; 1091 goto out;
865 } 1092 }
866 /* enable the device ref clock */
867 ufs_qcom_phy_enable_dev_ref_clk(host->generic_phy);
868 vote = host->bus_vote.saved_vote; 1093 vote = host->bus_vote.saved_vote;
869 if (vote == host->bus_vote.min_bw_vote) 1094 if (vote == host->bus_vote.min_bw_vote)
870 ufs_qcom_update_bus_bw_vote(host); 1095 ufs_qcom_update_bus_bw_vote(host);
1096
871 } else { 1097 } else {
1098
872 /* M-PHY RMMI interface clocks can be turned off */ 1099 /* M-PHY RMMI interface clocks can be turned off */
873 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1100 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
874 if (!ufs_qcom_is_link_active(hba)) { 1101 if (!ufs_qcom_is_link_active(hba))
875 /* turn off UFS local PHY ref_clk */
876 ufs_qcom_phy_disable_ref_clk(host->generic_phy);
877 /* disable device ref_clk */ 1102 /* disable device ref_clk */
878 ufs_qcom_phy_disable_dev_ref_clk(host->generic_phy); 1103 ufs_qcom_dev_ref_clk_ctrl(host, false);
879 } 1104
880 vote = host->bus_vote.min_bw_vote; 1105 vote = host->bus_vote.min_bw_vote;
881 } 1106 }
882 1107
@@ -889,60 +1114,6 @@ out:
889 return err; 1114 return err;
890} 1115}
891 1116
892static ssize_t
893show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
894 char *buf)
895{
896 struct ufs_hba *hba = dev_get_drvdata(dev);
897 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
898
899 return snprintf(buf, PAGE_SIZE, "%u\n",
900 host->bus_vote.is_max_bw_needed);
901}
902
903static ssize_t
904store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
905 const char *buf, size_t count)
906{
907 struct ufs_hba *hba = dev_get_drvdata(dev);
908 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
909 uint32_t value;
910
911 if (!kstrtou32(buf, 0, &value)) {
912 host->bus_vote.is_max_bw_needed = !!value;
913 ufs_qcom_update_bus_bw_vote(host);
914 }
915
916 return count;
917}
918
919static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
920{
921 int err;
922 struct device *dev = host->hba->dev;
923 struct device_node *np = dev->of_node;
924
925 err = of_property_count_strings(np, "qcom,bus-vector-names");
926 if (err < 0 ) {
927 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
928 __func__, err);
929 goto out;
930 }
931
932 /* cache the vote index for minimum and maximum bandwidth */
933 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
934 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
935
936 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
937 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
938 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
939 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
940 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
941 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
942out:
943 return err;
944}
945
946#define ANDROID_BOOT_DEV_MAX 30 1117#define ANDROID_BOOT_DEV_MAX 30
947static char android_boot_dev[ANDROID_BOOT_DEV_MAX]; 1118static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
948 1119
@@ -969,7 +1140,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
969{ 1140{
970 int err; 1141 int err;
971 struct device *dev = hba->dev; 1142 struct device *dev = hba->dev;
1143 struct platform_device *pdev = to_platform_device(dev);
972 struct ufs_qcom_host *host; 1144 struct ufs_qcom_host *host;
1145 struct resource *res;
973 1146
974 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev))) 1147 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
975 return -ENODEV; 1148 return -ENODEV;
@@ -981,9 +1154,15 @@ static int ufs_qcom_init(struct ufs_hba *hba)
981 goto out; 1154 goto out;
982 } 1155 }
983 1156
1157 /* Make a two way bind between the qcom host and the hba */
984 host->hba = hba; 1158 host->hba = hba;
985 ufshcd_set_variant(hba, host); 1159 ufshcd_set_variant(hba, host);
986 1160
1161 /*
1162 * voting/devoting device ref_clk source is time consuming hence
1163 * skip devoting it during aggressive clock gating. This clock
1164 * will still be gated off during runtime suspend.
1165 */
987 host->generic_phy = devm_phy_get(dev, "ufsphy"); 1166 host->generic_phy = devm_phy_get(dev, "ufsphy");
988 1167
989 if (IS_ERR(host->generic_phy)) { 1168 if (IS_ERR(host->generic_phy)) {
@@ -999,6 +1178,30 @@ static int ufs_qcom_init(struct ufs_hba *hba)
999 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major, 1178 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1000 &host->hw_ver.minor, &host->hw_ver.step); 1179 &host->hw_ver.minor, &host->hw_ver.step);
1001 1180
1181 /*
1182 * for newer controllers, device reference clock control bit has
1183 * moved inside UFS controller register address space itself.
1184 */
1185 if (host->hw_ver.major >= 0x02) {
1186 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1187 host->dev_ref_clk_en_mask = BIT(26);
1188 } else {
1189 /* "dev_ref_clk_ctrl_mem" is optional resource */
1190 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1191 if (res) {
1192 host->dev_ref_clk_ctrl_mmio =
1193 devm_ioremap_resource(dev, res);
1194 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1195 dev_warn(dev,
1196 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1197 __func__,
1198 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1199 host->dev_ref_clk_ctrl_mmio = NULL;
1200 }
1201 host->dev_ref_clk_en_mask = BIT(5);
1202 }
1203 }
1204
1002 /* update phy revision information before calling phy_init() */ 1205 /* update phy revision information before calling phy_init() */
1003 ufs_qcom_phy_save_controller_version(host->generic_phy, 1206 ufs_qcom_phy_save_controller_version(host->generic_phy,
1004 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step); 1207 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
@@ -1015,9 +1218,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1015 ufs_qcom_set_caps(hba); 1218 ufs_qcom_set_caps(hba);
1016 ufs_qcom_advertise_quirks(hba); 1219 ufs_qcom_advertise_quirks(hba);
1017 1220
1018 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_CLK_SCALING;
1019 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1020
1021 ufs_qcom_setup_clocks(hba, true); 1221 ufs_qcom_setup_clocks(hba, true);
1022 1222
1023 if (hba->dev->id < MAX_UFS_QCOM_HOSTS) 1223 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -1053,14 +1253,118 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
1053 phy_power_off(host->generic_phy); 1253 phy_power_off(host->generic_phy);
1054} 1254}
1055 1255
1056static 1256static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1057void ufs_qcom_clk_scale_notify(struct ufs_hba *hba) 1257 u32 clk_cycles)
1258{
1259 int err;
1260 u32 core_clk_ctrl_reg;
1261
1262 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1263 return -EINVAL;
1264
1265 err = ufshcd_dme_get(hba,
1266 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1267 &core_clk_ctrl_reg);
1268 if (err)
1269 goto out;
1270
1271 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1272 core_clk_ctrl_reg |= clk_cycles;
1273
1274 /* Clear CORE_CLK_DIV_EN */
1275 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1276
1277 err = ufshcd_dme_set(hba,
1278 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1279 core_clk_ctrl_reg);
1280out:
1281 return err;
1282}
1283
1284static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1285{
1286 /* nothing to do as of now */
1287 return 0;
1288}
1289
1290static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1291{
1292 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1293
1294 if (!ufs_qcom_cap_qunipro(host))
1295 return 0;
1296
1297 /* set unipro core clock cycles to 150 and clear clock divider */
1298 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1299}
1300
1301static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1302{
1303 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1304 int err;
1305 u32 core_clk_ctrl_reg;
1306
1307 if (!ufs_qcom_cap_qunipro(host))
1308 return 0;
1309
1310 err = ufshcd_dme_get(hba,
1311 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1312 &core_clk_ctrl_reg);
1313
1314 /* make sure CORE_CLK_DIV_EN is cleared */
1315 if (!err &&
1316 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1317 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1318 err = ufshcd_dme_set(hba,
1319 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1320 core_clk_ctrl_reg);
1321 }
1322
1323 return err;
1324}
1325
1326static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1327{
1328 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1329
1330 if (!ufs_qcom_cap_qunipro(host))
1331 return 0;
1332
1333 /* set unipro core clock cycles to 75 and clear clock divider */
1334 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1335}
1336
1337static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1338 bool scale_up, enum ufs_notify_change_status status)
1058{ 1339{
1059 struct ufs_qcom_host *host = ufshcd_get_variant(hba); 1340 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1060 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params; 1341 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1342 int err = 0;
1061 1343
1062 if (!dev_req_params) 1344 if (status == PRE_CHANGE) {
1063 return; 1345 if (scale_up)
1346 err = ufs_qcom_clk_scale_up_pre_change(hba);
1347 else
1348 err = ufs_qcom_clk_scale_down_pre_change(hba);
1349 } else {
1350 if (scale_up)
1351 err = ufs_qcom_clk_scale_up_post_change(hba);
1352 else
1353 err = ufs_qcom_clk_scale_down_post_change(hba);
1354
1355 if (err || !dev_req_params)
1356 goto out;
1357
1358 ufs_qcom_cfg_timers(hba,
1359 dev_req_params->gear_rx,
1360 dev_req_params->pwr_rx,
1361 dev_req_params->hs_rate,
1362 false);
1363 ufs_qcom_update_bus_bw_vote(host);
1364 }
1365
1366out:
1367 return err;
1064} 1368}
1065 1369
1066static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1370static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 1b71a1b0be9f..36249b35f858 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -35,8 +35,8 @@
35 35
36#define UFS_QCOM_LIMIT_NUM_LANES_RX 2 36#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
37#define UFS_QCOM_LIMIT_NUM_LANES_TX 2 37#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
38#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G2 38#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G3
39#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G2 39#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G3
40#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4 40#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
41#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4 41#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
42#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE 42#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
@@ -64,6 +64,11 @@ enum {
64 UFS_TEST_BUS_CTRL_2 = 0xF4, 64 UFS_TEST_BUS_CTRL_2 = 0xF4,
65 UFS_UNIPRO_CFG = 0xF8, 65 UFS_UNIPRO_CFG = 0xF8,
66 66
67 /*
68 * QCOM UFS host controller vendor specific registers
69 * added in HW Version 3.0.0
70 */
71 UFS_AH8_CFG = 0xFC,
67}; 72};
68 73
69/* QCOM UFS host controller vendor specific debug registers */ 74/* QCOM UFS host controller vendor specific debug registers */
@@ -83,6 +88,11 @@ enum {
83 UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, 88 UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
84}; 89};
85 90
91#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
92#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
93
94/* bit definitions for REG_UFS_CFG1 register */
95#define QUNIPRO_SEL UFS_BIT(0)
86#define TEST_BUS_EN BIT(18) 96#define TEST_BUS_EN BIT(18)
87#define TEST_BUS_SEL GENMASK(22, 19) 97#define TEST_BUS_SEL GENMASK(22, 19)
88 98
@@ -131,6 +141,12 @@ enum ufs_qcom_phy_init_type {
131 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \ 141 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
132 UFS_QCOM_DBG_PRINT_TEST_BUS_EN) 142 UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
133 143
144/* QUniPro Vendor specific attributes */
145#define DME_VS_CORE_CLK_CTRL 0xD002
146/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
147#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
148#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
149
134static inline void 150static inline void
135ufs_qcom_get_controller_revision(struct ufs_hba *hba, 151ufs_qcom_get_controller_revision(struct ufs_hba *hba,
136 u8 *major, u16 *minor, u16 *step) 152 u8 *major, u16 *minor, u16 *step)
@@ -196,6 +212,12 @@ struct ufs_qcom_host {
196 * controller supports the QUniPro mode. 212 * controller supports the QUniPro mode.
197 */ 213 */
198 #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0) 214 #define UFS_QCOM_CAP_QUNIPRO UFS_BIT(0)
215
216 /*
217 * Set this capability if host controller can retain the secure
218 * configuration even after UFS controller core power collapse.
219 */
220 #define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1)
199 u32 caps; 221 u32 caps;
200 222
201 struct phy *generic_phy; 223 struct phy *generic_phy;
@@ -208,7 +230,12 @@ struct ufs_qcom_host {
208 struct clk *tx_l1_sync_clk; 230 struct clk *tx_l1_sync_clk;
209 bool is_lane_clks_enabled; 231 bool is_lane_clks_enabled;
210 232
233 void __iomem *dev_ref_clk_ctrl_mmio;
234 bool is_dev_ref_clk_enabled;
211 struct ufs_hw_version hw_ver; 235 struct ufs_hw_version hw_ver;
236
237 u32 dev_ref_clk_en_mask;
238
212 /* Bitmask for enabling debug prints */ 239 /* Bitmask for enabling debug prints */
213 u32 dbg_print_en; 240 u32 dbg_print_en;
214 struct ufs_qcom_testbus testbus; 241 struct ufs_qcom_testbus testbus;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 52f9dad96fd1..131c72038bf8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -5420,6 +5420,10 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5420 if (!head || list_empty(head)) 5420 if (!head || list_empty(head))
5421 goto out; 5421 goto out;
5422 5422
5423 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
5424 if (ret)
5425 return ret;
5426
5423 list_for_each_entry(clki, head, list) { 5427 list_for_each_entry(clki, head, list) {
5424 if (!IS_ERR_OR_NULL(clki->clk)) { 5428 if (!IS_ERR_OR_NULL(clki->clk)) {
5425 if (scale_up && clki->max_freq) { 5429 if (scale_up && clki->max_freq) {
@@ -5450,7 +5454,9 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5450 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__, 5454 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5451 clki->name, clk_get_rate(clki->clk)); 5455 clki->name, clk_get_rate(clki->clk));
5452 } 5456 }
5453 ufshcd_vops_clk_scale_notify(hba); 5457
5458 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
5459
5454out: 5460out:
5455 return ret; 5461 return ret;
5456} 5462}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 471c667a1fb4..2570d9477b37 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -223,8 +223,10 @@ struct ufs_clk_info {
223 bool enabled; 223 bool enabled;
224}; 224};
225 225
226#define PRE_CHANGE 0 226enum ufs_notify_change_status {
227#define POST_CHANGE 1 227 PRE_CHANGE,
228 POST_CHANGE,
229};
228 230
229struct ufs_pa_layer_attr { 231struct ufs_pa_layer_attr {
230 u32 gear_rx; 232 u32 gear_rx;
@@ -266,13 +268,17 @@ struct ufs_hba_variant_ops {
266 int (*init)(struct ufs_hba *); 268 int (*init)(struct ufs_hba *);
267 void (*exit)(struct ufs_hba *); 269 void (*exit)(struct ufs_hba *);
268 u32 (*get_ufs_hci_version)(struct ufs_hba *); 270 u32 (*get_ufs_hci_version)(struct ufs_hba *);
269 void (*clk_scale_notify)(struct ufs_hba *); 271 int (*clk_scale_notify)(struct ufs_hba *, bool,
270 int (*setup_clocks)(struct ufs_hba *, bool); 272 enum ufs_notify_change_status);
273 int (*setup_clocks)(struct ufs_hba *, bool);
271 int (*setup_regulators)(struct ufs_hba *, bool); 274 int (*setup_regulators)(struct ufs_hba *, bool);
272 int (*hce_enable_notify)(struct ufs_hba *, bool); 275 int (*hce_enable_notify)(struct ufs_hba *,
273 int (*link_startup_notify)(struct ufs_hba *, bool); 276 enum ufs_notify_change_status);
277 int (*link_startup_notify)(struct ufs_hba *,
278 enum ufs_notify_change_status);
274 int (*pwr_change_notify)(struct ufs_hba *, 279 int (*pwr_change_notify)(struct ufs_hba *,
275 bool, struct ufs_pa_layer_attr *, 280 enum ufs_notify_change_status status,
281 struct ufs_pa_layer_attr *,
276 struct ufs_pa_layer_attr *); 282 struct ufs_pa_layer_attr *);
277 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 283 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
278 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 284 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
@@ -708,17 +714,18 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
708 return ufshcd_readl(hba, REG_UFS_VERSION); 714 return ufshcd_readl(hba, REG_UFS_VERSION);
709} 715}
710 716
711static inline void ufshcd_vops_clk_scale_notify(struct ufs_hba *hba) 717static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
718 bool up, enum ufs_notify_change_status status)
712{ 719{
713 if (hba->vops && hba->vops->clk_scale_notify) 720 if (hba->vops && hba->vops->clk_scale_notify)
714 return hba->vops->clk_scale_notify(hba); 721 return hba->vops->clk_scale_notify(hba, up, status);
722 return 0;
715} 723}
716 724
717static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on) 725static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
718{ 726{
719 if (hba->vops && hba->vops->setup_clocks) 727 if (hba->vops && hba->vops->setup_clocks)
720 return hba->vops->setup_clocks(hba, on); 728 return hba->vops->setup_clocks(hba, on);
721
722 return 0; 729 return 0;
723} 730}
724 731