aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
authorSahitya Tummala <stummala@codeaurora.org>2014-09-25 08:32:32 -0400
committerChristoph Hellwig <hch@lst.de>2014-10-01 07:11:24 -0400
commit1ab27c9cf8b63dd8dec9e17b5c17721c7f3b6cc7 (patch)
tree50663bbaad4193381118b8ec3a1a4c451b70a2c0 /drivers/scsi/ufs
parent7eb584db73bebbc9852a14341431ed6935419bec (diff)
ufs: Add support for clock gating
The UFS controller clocks can be gated after certain period of inactivity, which is typically less than runtime suspend timeout. In addition to clocks the link will also be put into Hibern8 mode to save more power. The clock gating can be turned on by enabling the capability UFSHCD_CAP_CLK_GATING. To enable entering into Hibern8 mode as part of clock gating, set the capability UFSHCD_CAP_HIBERN8_WITH_CLK_GATING. The tracing events for clock gating can be enabled through debugfs as: echo 1 > /sys/kernel/debug/tracing/events/ufs/ufshcd_clk_gating/enable cat /sys/kernel/debug/tracing/trace_pipe Signed-off-by: Sahitya Tummala <stummala@codeaurora.org> Signed-off-by: Dolev Raviv <draviv@codeaurora.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufshcd.c323
-rw-r--r--drivers/scsi/ufs/ufshcd.h51
2 files changed, 349 insertions, 25 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8bbb37d7db41..6f1ea5192db6 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -177,6 +177,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
177static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 177static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
178static void ufshcd_hba_exit(struct ufs_hba *hba); 178static void ufshcd_hba_exit(struct ufs_hba *hba);
179static int ufshcd_probe_hba(struct ufs_hba *hba); 179static int ufshcd_probe_hba(struct ufs_hba *hba);
180static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
181 bool skip_ref_clk);
182static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
183static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
184static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
180static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 185static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
181static irqreturn_t ufshcd_intr(int irq, void *__hba); 186static irqreturn_t ufshcd_intr(int irq, void *__hba);
182static int ufshcd_config_pwr_mode(struct ufs_hba *hba, 187static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
@@ -507,6 +512,231 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
507 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; 512 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
508} 513}
509 514
515static void ufshcd_ungate_work(struct work_struct *work)
516{
517 int ret;
518 unsigned long flags;
519 struct ufs_hba *hba = container_of(work, struct ufs_hba,
520 clk_gating.ungate_work);
521
522 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
523
524 spin_lock_irqsave(hba->host->host_lock, flags);
525 if (hba->clk_gating.state == CLKS_ON) {
526 spin_unlock_irqrestore(hba->host->host_lock, flags);
527 goto unblock_reqs;
528 }
529
530 spin_unlock_irqrestore(hba->host->host_lock, flags);
531 ufshcd_setup_clocks(hba, true);
532
533 /* Exit from hibern8 */
534 if (ufshcd_can_hibern8_during_gating(hba)) {
535 /* Prevent gating in this path */
536 hba->clk_gating.is_suspended = true;
537 if (ufshcd_is_link_hibern8(hba)) {
538 ret = ufshcd_uic_hibern8_exit(hba);
539 if (ret)
540 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
541 __func__, ret);
542 else
543 ufshcd_set_link_active(hba);
544 }
545 hba->clk_gating.is_suspended = false;
546 }
547unblock_reqs:
548 scsi_unblock_requests(hba->host);
549}
550
551/**
552 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
553 * Also, exit from hibern8 mode and set the link as active.
554 * @hba: per adapter instance
555 * @async: This indicates whether caller should ungate clocks asynchronously.
556 */
557int ufshcd_hold(struct ufs_hba *hba, bool async)
558{
559 int rc = 0;
560 unsigned long flags;
561
562 if (!ufshcd_is_clkgating_allowed(hba))
563 goto out;
564start:
565 spin_lock_irqsave(hba->host->host_lock, flags);
566 hba->clk_gating.active_reqs++;
567
568 switch (hba->clk_gating.state) {
569 case CLKS_ON:
570 break;
571 case REQ_CLKS_OFF:
572 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
573 hba->clk_gating.state = CLKS_ON;
574 break;
575 }
576 /*
577 * If we here, it means gating work is either done or
578 * currently running. Hence, fall through to cancel gating
579 * work and to enable clocks.
580 */
581 case CLKS_OFF:
582 scsi_block_requests(hba->host);
583 hba->clk_gating.state = REQ_CLKS_ON;
584 schedule_work(&hba->clk_gating.ungate_work);
585 /*
586 * fall through to check if we should wait for this
587 * work to be done or not.
588 */
589 case REQ_CLKS_ON:
590 if (async) {
591 rc = -EAGAIN;
592 hba->clk_gating.active_reqs--;
593 break;
594 }
595
596 spin_unlock_irqrestore(hba->host->host_lock, flags);
597 flush_work(&hba->clk_gating.ungate_work);
598 /* Make sure state is CLKS_ON before returning */
599 goto start;
600 default:
601 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
602 __func__, hba->clk_gating.state);
603 break;
604 }
605 spin_unlock_irqrestore(hba->host->host_lock, flags);
606out:
607 return rc;
608}
609
610static void ufshcd_gate_work(struct work_struct *work)
611{
612 struct ufs_hba *hba = container_of(work, struct ufs_hba,
613 clk_gating.gate_work.work);
614 unsigned long flags;
615
616 spin_lock_irqsave(hba->host->host_lock, flags);
617 if (hba->clk_gating.is_suspended) {
618 hba->clk_gating.state = CLKS_ON;
619 goto rel_lock;
620 }
621
622 if (hba->clk_gating.active_reqs
623 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
624 || hba->lrb_in_use || hba->outstanding_tasks
625 || hba->active_uic_cmd || hba->uic_async_done)
626 goto rel_lock;
627
628 spin_unlock_irqrestore(hba->host->host_lock, flags);
629
630 /* put the link into hibern8 mode before turning off clocks */
631 if (ufshcd_can_hibern8_during_gating(hba)) {
632 if (ufshcd_uic_hibern8_enter(hba)) {
633 hba->clk_gating.state = CLKS_ON;
634 goto out;
635 }
636 ufshcd_set_link_hibern8(hba);
637 }
638
639 if (!ufshcd_is_link_active(hba))
640 ufshcd_setup_clocks(hba, false);
641 else
642 /* If link is active, device ref_clk can't be switched off */
643 __ufshcd_setup_clocks(hba, false, true);
644
645 /*
646 * In case you are here to cancel this work the gating state
647 * would be marked as REQ_CLKS_ON. In this case keep the state
648 * as REQ_CLKS_ON which would anyway imply that clocks are off
649 * and a request to turn them on is pending. By doing this way,
650 * we keep the state machine in tact and this would ultimately
651 * prevent from doing cancel work multiple times when there are
652 * new requests arriving before the current cancel work is done.
653 */
654 spin_lock_irqsave(hba->host->host_lock, flags);
655 if (hba->clk_gating.state == REQ_CLKS_OFF)
656 hba->clk_gating.state = CLKS_OFF;
657
658rel_lock:
659 spin_unlock_irqrestore(hba->host->host_lock, flags);
660out:
661 return;
662}
663
664/* host lock must be held before calling this variant */
665static void __ufshcd_release(struct ufs_hba *hba)
666{
667 if (!ufshcd_is_clkgating_allowed(hba))
668 return;
669
670 hba->clk_gating.active_reqs--;
671
672 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
673 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
674 || hba->lrb_in_use || hba->outstanding_tasks
675 || hba->active_uic_cmd || hba->uic_async_done)
676 return;
677
678 hba->clk_gating.state = REQ_CLKS_OFF;
679 schedule_delayed_work(&hba->clk_gating.gate_work,
680 msecs_to_jiffies(hba->clk_gating.delay_ms));
681}
682
683void ufshcd_release(struct ufs_hba *hba)
684{
685 unsigned long flags;
686
687 spin_lock_irqsave(hba->host->host_lock, flags);
688 __ufshcd_release(hba);
689 spin_unlock_irqrestore(hba->host->host_lock, flags);
690}
691
692static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
693 struct device_attribute *attr, char *buf)
694{
695 struct ufs_hba *hba = dev_get_drvdata(dev);
696
697 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
698}
699
700static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
701 struct device_attribute *attr, const char *buf, size_t count)
702{
703 struct ufs_hba *hba = dev_get_drvdata(dev);
704 unsigned long flags, value;
705
706 if (kstrtoul(buf, 0, &value))
707 return -EINVAL;
708
709 spin_lock_irqsave(hba->host->host_lock, flags);
710 hba->clk_gating.delay_ms = value;
711 spin_unlock_irqrestore(hba->host->host_lock, flags);
712 return count;
713}
714
715static void ufshcd_init_clk_gating(struct ufs_hba *hba)
716{
717 if (!ufshcd_is_clkgating_allowed(hba))
718 return;
719
720 hba->clk_gating.delay_ms = 150;
721 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
722 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
723
724 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
725 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
726 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
727 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
728 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
729 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
730 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
731}
732
733static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
734{
735 if (!ufshcd_is_clkgating_allowed(hba))
736 return;
737 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
738}
739
510/** 740/**
511 * ufshcd_send_command - Send SCSI or device management commands 741 * ufshcd_send_command - Send SCSI or device management commands
512 * @hba: per adapter instance 742 * @hba: per adapter instance
@@ -702,6 +932,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
702 int ret; 932 int ret;
703 unsigned long flags; 933 unsigned long flags;
704 934
935 ufshcd_hold(hba, false);
705 mutex_lock(&hba->uic_cmd_mutex); 936 mutex_lock(&hba->uic_cmd_mutex);
706 spin_lock_irqsave(hba->host->host_lock, flags); 937 spin_lock_irqsave(hba->host->host_lock, flags);
707 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); 938 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
@@ -711,6 +942,7 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
711 942
712 mutex_unlock(&hba->uic_cmd_mutex); 943 mutex_unlock(&hba->uic_cmd_mutex);
713 944
945 ufshcd_release(hba);
714 return ret; 946 return ret;
715} 947}
716 948
@@ -1037,6 +1269,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1037 goto out; 1269 goto out;
1038 } 1270 }
1039 1271
1272 err = ufshcd_hold(hba, true);
1273 if (err) {
1274 err = SCSI_MLQUEUE_HOST_BUSY;
1275 clear_bit_unlock(tag, &hba->lrb_in_use);
1276 goto out;
1277 }
1278 WARN_ON(hba->clk_gating.state != CLKS_ON);
1279
1040 lrbp = &hba->lrb[tag]; 1280 lrbp = &hba->lrb[tag];
1041 1281
1042 WARN_ON(lrbp->cmd); 1282 WARN_ON(lrbp->cmd);
@@ -1312,6 +1552,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1312 1552
1313 BUG_ON(!hba); 1553 BUG_ON(!hba);
1314 1554
1555 ufshcd_hold(hba, false);
1315 mutex_lock(&hba->dev_cmd.lock); 1556 mutex_lock(&hba->dev_cmd.lock);
1316 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 1557 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1317 selector); 1558 selector);
@@ -1355,6 +1596,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1355 1596
1356out_unlock: 1597out_unlock:
1357 mutex_unlock(&hba->dev_cmd.lock); 1598 mutex_unlock(&hba->dev_cmd.lock);
1599 ufshcd_release(hba);
1358 return err; 1600 return err;
1359} 1601}
1360 1602
@@ -1378,6 +1620,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1378 1620
1379 BUG_ON(!hba); 1621 BUG_ON(!hba);
1380 1622
1623 ufshcd_hold(hba, false);
1381 if (!attr_val) { 1624 if (!attr_val) {
1382 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", 1625 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1383 __func__, opcode); 1626 __func__, opcode);
@@ -1417,6 +1660,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1417out_unlock: 1660out_unlock:
1418 mutex_unlock(&hba->dev_cmd.lock); 1661 mutex_unlock(&hba->dev_cmd.lock);
1419out: 1662out:
1663 ufshcd_release(hba);
1420 return err; 1664 return err;
1421} 1665}
1422 1666
@@ -1444,6 +1688,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
1444 1688
1445 BUG_ON(!hba); 1689 BUG_ON(!hba);
1446 1690
1691 ufshcd_hold(hba, false);
1447 if (!desc_buf) { 1692 if (!desc_buf) {
1448 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 1693 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1449 __func__, opcode); 1694 __func__, opcode);
@@ -1493,6 +1738,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
1493out_unlock: 1738out_unlock:
1494 mutex_unlock(&hba->dev_cmd.lock); 1739 mutex_unlock(&hba->dev_cmd.lock);
1495out: 1740out:
1741 ufshcd_release(hba);
1496 return err; 1742 return err;
1497} 1743}
1498 1744
@@ -1913,6 +2159,7 @@ out:
1913 hba->uic_async_done = NULL; 2159 hba->uic_async_done = NULL;
1914 spin_unlock_irqrestore(hba->host->host_lock, flags); 2160 spin_unlock_irqrestore(hba->host->host_lock, flags);
1915 mutex_unlock(&hba->uic_cmd_mutex); 2161 mutex_unlock(&hba->uic_cmd_mutex);
2162
1916 return ret; 2163 return ret;
1917} 2164}
1918 2165
@@ -1927,12 +2174,16 @@ out:
1927static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 2174static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1928{ 2175{
1929 struct uic_command uic_cmd = {0}; 2176 struct uic_command uic_cmd = {0};
2177 int ret;
1930 2178
1931 uic_cmd.command = UIC_CMD_DME_SET; 2179 uic_cmd.command = UIC_CMD_DME_SET;
1932 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); 2180 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1933 uic_cmd.argument3 = mode; 2181 uic_cmd.argument3 = mode;
2182 ufshcd_hold(hba, false);
2183 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2184 ufshcd_release(hba);
1934 2185
1935 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd); 2186 return ret;
1936} 2187}
1937 2188
1938static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) 2189static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
@@ -2354,6 +2605,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2354 int err = 0; 2605 int err = 0;
2355 int retries; 2606 int retries;
2356 2607
2608 ufshcd_hold(hba, false);
2357 mutex_lock(&hba->dev_cmd.lock); 2609 mutex_lock(&hba->dev_cmd.lock);
2358 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 2610 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
2359 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 2611 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -2365,6 +2617,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
2365 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 2617 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
2366 } 2618 }
2367 mutex_unlock(&hba->dev_cmd.lock); 2619 mutex_unlock(&hba->dev_cmd.lock);
2620 ufshcd_release(hba);
2368 2621
2369 if (err) 2622 if (err)
2370 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 2623 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -2764,6 +3017,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2764 clear_bit_unlock(index, &hba->lrb_in_use); 3017 clear_bit_unlock(index, &hba->lrb_in_use);
2765 /* Do not touch lrbp after scsi done */ 3018 /* Do not touch lrbp after scsi done */
2766 cmd->scsi_done(cmd); 3019 cmd->scsi_done(cmd);
3020 __ufshcd_release(hba);
2767 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { 3021 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
2768 if (hba->dev_cmd.complete) 3022 if (hba->dev_cmd.complete)
2769 complete(hba->dev_cmd.complete); 3023 complete(hba->dev_cmd.complete);
@@ -3048,6 +3302,7 @@ static void ufshcd_err_handler(struct work_struct *work)
3048 hba = container_of(work, struct ufs_hba, eh_work); 3302 hba = container_of(work, struct ufs_hba, eh_work);
3049 3303
3050 pm_runtime_get_sync(hba->dev); 3304 pm_runtime_get_sync(hba->dev);
3305 ufshcd_hold(hba, false);
3051 3306
3052 spin_lock_irqsave(hba->host->host_lock, flags); 3307 spin_lock_irqsave(hba->host->host_lock, flags);
3053 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { 3308 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@@ -3101,6 +3356,7 @@ static void ufshcd_err_handler(struct work_struct *work)
3101 3356
3102out: 3357out:
3103 scsi_unblock_requests(hba->host); 3358 scsi_unblock_requests(hba->host);
3359 ufshcd_release(hba);
3104 pm_runtime_put_sync(hba->dev); 3360 pm_runtime_put_sync(hba->dev);
3105} 3361}
3106 3362
@@ -3284,6 +3540,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3284 * the maximum wait time is bounded by %TM_CMD_TIMEOUT. 3540 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
3285 */ 3541 */
3286 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); 3542 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
3543 ufshcd_hold(hba, false);
3287 3544
3288 spin_lock_irqsave(host->host_lock, flags); 3545 spin_lock_irqsave(host->host_lock, flags);
3289 task_req_descp = hba->utmrdl_base_addr; 3546 task_req_descp = hba->utmrdl_base_addr;
@@ -3335,6 +3592,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
3335 ufshcd_put_tm_slot(hba, free_slot); 3592 ufshcd_put_tm_slot(hba, free_slot);
3336 wake_up(&hba->tm_tag_wq); 3593 wake_up(&hba->tm_tag_wq);
3337 3594
3595 ufshcd_release(hba);
3338 return err; 3596 return err;
3339} 3597}
3340 3598
@@ -3417,6 +3675,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
3417 hba = shost_priv(host); 3675 hba = shost_priv(host);
3418 tag = cmd->request->tag; 3676 tag = cmd->request->tag;
3419 3677
3678 ufshcd_hold(hba, false);
3420 /* If command is already aborted/completed, return SUCCESS */ 3679 /* If command is already aborted/completed, return SUCCESS */
3421 if (!(test_bit(tag, &hba->outstanding_reqs))) 3680 if (!(test_bit(tag, &hba->outstanding_reqs)))
3422 goto out; 3681 goto out;
@@ -3481,6 +3740,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
3481 3740
3482 clear_bit_unlock(tag, &hba->lrb_in_use); 3741 clear_bit_unlock(tag, &hba->lrb_in_use);
3483 wake_up(&hba->dev_cmd.tag_wq); 3742 wake_up(&hba->dev_cmd.tag_wq);
3743
3484out: 3744out:
3485 if (!err) { 3745 if (!err) {
3486 err = SUCCESS; 3746 err = SUCCESS;
@@ -3489,6 +3749,11 @@ out:
3489 err = FAILED; 3749 err = FAILED;
3490 } 3750 }
3491 3751
3752 /*
3753 * This ufshcd_release() corresponds to the original scsi cmd that got
3754 * aborted here (as we won't get any IRQ for it).
3755 */
3756 ufshcd_release(hba);
3492 return err; 3757 return err;
3493} 3758}
3494 3759
@@ -3573,6 +3838,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3573 3838
3574 hba = shost_priv(cmd->device->host); 3839 hba = shost_priv(cmd->device->host);
3575 3840
3841 ufshcd_hold(hba, false);
3576 /* 3842 /*
3577 * Check if there is any race with fatal error handling. 3843 * Check if there is any race with fatal error handling.
3578 * If so, wait for it to complete. Even though fatal error 3844 * If so, wait for it to complete. Even though fatal error
@@ -3606,6 +3872,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3606 ufshcd_clear_eh_in_progress(hba); 3872 ufshcd_clear_eh_in_progress(hba);
3607 spin_unlock_irqrestore(hba->host->host_lock, flags); 3873 spin_unlock_irqrestore(hba->host->host_lock, flags);
3608 3874
3875 ufshcd_release(hba);
3609 return err; 3876 return err;
3610} 3877}
3611 3878
@@ -3925,6 +4192,7 @@ static struct scsi_host_template ufshcd_driver_template = {
3925 .sg_tablesize = SG_ALL, 4192 .sg_tablesize = SG_ALL,
3926 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 4193 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
3927 .can_queue = UFSHCD_CAN_QUEUE, 4194 .can_queue = UFSHCD_CAN_QUEUE,
4195 .max_host_blocked = 1,
3928}; 4196};
3929 4197
3930static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, 4198static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
@@ -4127,6 +4395,7 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4127 int ret = 0; 4395 int ret = 0;
4128 struct ufs_clk_info *clki; 4396 struct ufs_clk_info *clki;
4129 struct list_head *head = &hba->clk_list_head; 4397 struct list_head *head = &hba->clk_list_head;
4398 unsigned long flags;
4130 4399
4131 if (!head || list_empty(head)) 4400 if (!head || list_empty(head))
4132 goto out; 4401 goto out;
@@ -4151,12 +4420,19 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4151 clki->name, on ? "en" : "dis"); 4420 clki->name, on ? "en" : "dis");
4152 } 4421 }
4153 } 4422 }
4423
4424 if (hba->vops && hba->vops->setup_clocks)
4425 ret = hba->vops->setup_clocks(hba, on);
4154out: 4426out:
4155 if (ret) { 4427 if (ret) {
4156 list_for_each_entry(clki, head, list) { 4428 list_for_each_entry(clki, head, list) {
4157 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 4429 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4158 clk_disable_unprepare(clki->clk); 4430 clk_disable_unprepare(clki->clk);
4159 } 4431 }
4432 } else if (!ret && on) {
4433 spin_lock_irqsave(hba->host->host_lock, flags);
4434 hba->clk_gating.state = CLKS_ON;
4435 spin_unlock_irqrestore(hba->host->host_lock, flags);
4160 } 4436 }
4161 return ret; 4437 return ret;
4162} 4438}
@@ -4217,23 +4493,14 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4217 goto out; 4493 goto out;
4218 } 4494 }
4219 4495
4220 if (hba->vops->setup_clocks) {
4221 err = hba->vops->setup_clocks(hba, true);
4222 if (err)
4223 goto out_exit;
4224 }
4225
4226 if (hba->vops->setup_regulators) { 4496 if (hba->vops->setup_regulators) {
4227 err = hba->vops->setup_regulators(hba, true); 4497 err = hba->vops->setup_regulators(hba, true);
4228 if (err) 4498 if (err)
4229 goto out_clks; 4499 goto out_exit;
4230 } 4500 }
4231 4501
4232 goto out; 4502 goto out;
4233 4503
4234out_clks:
4235 if (hba->vops->setup_clocks)
4236 hba->vops->setup_clocks(hba, false);
4237out_exit: 4504out_exit:
4238 if (hba->vops->exit) 4505 if (hba->vops->exit)
4239 hba->vops->exit(hba); 4506 hba->vops->exit(hba);
@@ -4555,6 +4822,9 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4555 * If we can't transition into any of the low power modes 4822 * If we can't transition into any of the low power modes
4556 * just gate the clocks. 4823 * just gate the clocks.
4557 */ 4824 */
4825 ufshcd_hold(hba, false);
4826 hba->clk_gating.is_suspended = true;
4827
4558 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE && 4828 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
4559 req_link_state == UIC_LINK_ACTIVE_STATE) { 4829 req_link_state == UIC_LINK_ACTIVE_STATE) {
4560 goto disable_clks; 4830 goto disable_clks;
@@ -4577,7 +4847,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4577 */ 4847 */
4578 ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL); 4848 ret = ufshcd_bkops_ctrl(hba, BKOPS_STATUS_NON_CRITICAL);
4579 if (ret) 4849 if (ret)
4580 goto out; 4850 goto enable_gating;
4581 } 4851 }
4582 4852
4583 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) && 4853 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
@@ -4587,7 +4857,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4587 ufshcd_disable_auto_bkops(hba); 4857 ufshcd_disable_auto_bkops(hba);
4588 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode); 4858 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
4589 if (ret) 4859 if (ret)
4590 goto out; 4860 goto enable_gating;
4591 } 4861 }
4592 4862
4593 ret = ufshcd_link_state_transition(hba, req_link_state, 1); 4863 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
@@ -4620,6 +4890,7 @@ disable_clks:
4620 /* If link is active, device ref_clk can't be switched off */ 4890 /* If link is active, device ref_clk can't be switched off */
4621 __ufshcd_setup_clocks(hba, false, true); 4891 __ufshcd_setup_clocks(hba, false, true);
4622 4892
4893 hba->clk_gating.state = CLKS_OFF;
4623 /* 4894 /*
4624 * Disable the host irq as host controller as there won't be any 4895 * Disable the host irq as host controller as there won't be any
4625 * host controller trasanction expected till resume. 4896 * host controller trasanction expected till resume.
@@ -4641,6 +4912,9 @@ set_link_active:
4641set_dev_active: 4912set_dev_active:
4642 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE)) 4913 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
4643 ufshcd_disable_auto_bkops(hba); 4914 ufshcd_disable_auto_bkops(hba);
4915enable_gating:
4916 hba->clk_gating.is_suspended = false;
4917 ufshcd_release(hba);
4644out: 4918out:
4645 hba->pm_op_in_progress = 0; 4919 hba->pm_op_in_progress = 0;
4646 return ret; 4920 return ret;
@@ -4670,12 +4944,6 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4670 if (ret) 4944 if (ret)
4671 goto out; 4945 goto out;
4672 4946
4673 if (hba->vops && hba->vops->setup_clocks) {
4674 ret = hba->vops->setup_clocks(hba, true);
4675 if (ret)
4676 goto disable_clks;
4677 }
4678
4679 /* enable the host irq as host controller would be active soon */ 4947 /* enable the host irq as host controller would be active soon */
4680 ret = ufshcd_enable_irq(hba); 4948 ret = ufshcd_enable_irq(hba);
4681 if (ret) 4949 if (ret)
@@ -4719,6 +4987,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
4719 } 4987 }
4720 4988
4721 ufshcd_disable_auto_bkops(hba); 4989 ufshcd_disable_auto_bkops(hba);
4990 hba->clk_gating.is_suspended = false;
4991
4992 /* Schedule clock gating in case of no access to UFS device yet */
4993 ufshcd_release(hba);
4722 goto out; 4994 goto out;
4723 4995
4724set_old_link_state: 4996set_old_link_state:
@@ -4730,9 +5002,6 @@ disable_vreg:
4730 ufshcd_vreg_set_lpm(hba); 5002 ufshcd_vreg_set_lpm(hba);
4731disable_irq_and_vops_clks: 5003disable_irq_and_vops_clks:
4732 ufshcd_disable_irq(hba); 5004 ufshcd_disable_irq(hba);
4733 if (hba->vops && hba->vops->setup_clocks)
4734 ret = hba->vops->setup_clocks(hba, false);
4735disable_clks:
4736 ufshcd_setup_clocks(hba, false); 5005 ufshcd_setup_clocks(hba, false);
4737out: 5006out:
4738 hba->pm_op_in_progress = 0; 5007 hba->pm_op_in_progress = 0;
@@ -4902,6 +5171,7 @@ void ufshcd_remove(struct ufs_hba *hba)
4902 5171
4903 scsi_host_put(hba->host); 5172 scsi_host_put(hba->host);
4904 5173
5174 ufshcd_exit_clk_gating(hba);
4905 ufshcd_hba_exit(hba); 5175 ufshcd_hba_exit(hba);
4906} 5176}
4907EXPORT_SYMBOL_GPL(ufshcd_remove); 5177EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -5037,11 +5307,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5037 /* Initialize device management tag acquire wait queue */ 5307 /* Initialize device management tag acquire wait queue */
5038 init_waitqueue_head(&hba->dev_cmd.tag_wq); 5308 init_waitqueue_head(&hba->dev_cmd.tag_wq);
5039 5309
5310 ufshcd_init_clk_gating(hba);
5040 /* IRQ registration */ 5311 /* IRQ registration */
5041 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 5312 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
5042 if (err) { 5313 if (err) {
5043 dev_err(hba->dev, "request irq failed\n"); 5314 dev_err(hba->dev, "request irq failed\n");
5044 goto out_disable; 5315 goto exit_gating;
5045 } else { 5316 } else {
5046 hba->is_irq_enabled = true; 5317 hba->is_irq_enabled = true;
5047 } 5318 }
@@ -5050,13 +5321,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5050 err = scsi_init_shared_tag_map(host, host->can_queue); 5321 err = scsi_init_shared_tag_map(host, host->can_queue);
5051 if (err) { 5322 if (err) {
5052 dev_err(hba->dev, "init shared queue failed\n"); 5323 dev_err(hba->dev, "init shared queue failed\n");
5053 goto out_disable; 5324 goto exit_gating;
5054 } 5325 }
5055 5326
5056 err = scsi_add_host(host, hba->dev); 5327 err = scsi_add_host(host, hba->dev);
5057 if (err) { 5328 if (err) {
5058 dev_err(hba->dev, "scsi_add_host failed\n"); 5329 dev_err(hba->dev, "scsi_add_host failed\n");
5059 goto out_disable; 5330 goto exit_gating;
5060 } 5331 }
5061 5332
5062 /* Host controller enable */ 5333 /* Host controller enable */
@@ -5081,6 +5352,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5081 5352
5082out_remove_scsi_host: 5353out_remove_scsi_host:
5083 scsi_remove_host(hba->host); 5354 scsi_remove_host(hba->host);
5355exit_gating:
5356 ufshcd_exit_clk_gating(hba);
5084out_disable: 5357out_disable:
5085 hba->is_irq_enabled = false; 5358 hba->is_irq_enabled = false;
5086 scsi_host_put(host); 5359 scsi_host_put(host);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 343b18a7a8b0..29d34d3aa5ee 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -269,6 +269,38 @@ struct ufs_hba_variant_ops {
269 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 269 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
270}; 270};
271 271
272/* clock gating state */
273enum clk_gating_state {
274 CLKS_OFF,
275 CLKS_ON,
276 REQ_CLKS_OFF,
277 REQ_CLKS_ON,
278};
279
280/**
281 * struct ufs_clk_gating - UFS clock gating related info
282 * @gate_work: worker to turn off clocks after some delay as specified in
283 * delay_ms
284 * @ungate_work: worker to turn on clocks that will be used in case of
285 * interrupt context
286 * @state: the current clocks state
287 * @delay_ms: gating delay in ms
288 * @is_suspended: clk gating is suspended when set to 1 which can be used
289 * during suspend/resume
290 * @delay_attr: sysfs attribute to control delay_attr
291 * @active_reqs: number of requests that are pending and should be waited for
292 * completion before gating clocks.
293 */
294struct ufs_clk_gating {
295 struct delayed_work gate_work;
296 struct work_struct ungate_work;
297 enum clk_gating_state state;
298 unsigned long delay_ms;
299 bool is_suspended;
300 struct device_attribute delay_attr;
301 int active_reqs;
302};
303
272/** 304/**
273 * struct ufs_init_prefetch - contains data that is pre-fetched once during 305 * struct ufs_init_prefetch - contains data that is pre-fetched once during
274 * initialization 306 * initialization
@@ -414,8 +446,25 @@ struct ufs_hba {
414 446
415 struct ufs_pa_layer_attr pwr_info; 447 struct ufs_pa_layer_attr pwr_info;
416 struct ufs_pwr_mode_info max_pwr_info; 448 struct ufs_pwr_mode_info max_pwr_info;
449
450 struct ufs_clk_gating clk_gating;
451 /* Control to enable/disable host capabilities */
452 u32 caps;
453 /* Allow dynamic clk gating */
454#define UFSHCD_CAP_CLK_GATING (1 << 0)
455 /* Allow hiberb8 with clk gating */
456#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
417}; 457};
418 458
459/* Returns true if clocks can be gated. Otherwise false */
460static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
461{
462 return hba->caps & UFSHCD_CAP_CLK_GATING;
463}
464static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
465{
466 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
467}
419#define ufshcd_writel(hba, val, reg) \ 468#define ufshcd_writel(hba, val, reg) \
420 writel((val), (hba)->mmio_base + (reg)) 469 writel((val), (hba)->mmio_base + (reg))
421#define ufshcd_readl(hba, reg) \ 470#define ufshcd_readl(hba, reg) \
@@ -497,4 +546,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
497 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 546 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
498} 547}
499 548
549int ufshcd_hold(struct ufs_hba *hba, bool async);
550void ufshcd_release(struct ufs_hba *hba);
500#endif /* End of Header */ 551#endif /* End of Header */