aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt3
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c14
-rw-r--r--drivers/scsi/scsi_sas_internal.h2
-rw-r--r--drivers/scsi/scsi_sysfs.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c155
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h9
-rw-r--r--drivers/scsi/ufs/ufs.h33
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h151
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c19
-rw-r--r--drivers/scsi/ufs/ufshcd.c818
-rw-r--r--drivers/scsi/ufs/ufshcd.h40
-rw-r--r--drivers/scsi/ufs/ufshci.h4
-rw-r--r--drivers/scsi/ufs/unipro.h22
16 files changed, 1143 insertions, 135 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 03c0e989e020..66f6adf8d44d 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -38,6 +38,9 @@ Optional properties:
38 defined or a value in the array is "0" then it is assumed 38 defined or a value in the array is "0" then it is assumed
39 that the frequency is set by the parent clock or a 39 that the frequency is set by the parent clock or a
40 fixed rate clock source. 40 fixed rate clock source.
41-lanes-per-direction : number of lanes available per direction - either 1 or 2.
42 Note that it is assume same number of lanes is used both
43 directions at once. If not specified, default is 2 lanes per direction.
41 44
42Note: If above properties are not defined it can be assumed that the supply 45Note: If above properties are not defined it can be assumed that the supply
43regulators or clocks are always on. 46regulators or clocks are always on.
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 5bcdf8dd6fb0..a404a41e871c 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -332,7 +332,7 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
332{ 332{
333 int rel_port = -1, group_id; 333 int rel_port = -1, group_id;
334 struct alua_port_group *pg, *old_pg = NULL; 334 struct alua_port_group *pg, *old_pg = NULL;
335 bool pg_updated; 335 bool pg_updated = false;
336 unsigned long flags; 336 unsigned long flags;
337 337
338 group_id = scsi_vpd_tpg_id(sdev, &rel_port); 338 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 4484e63033a5..fce414a2cd76 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -2097,7 +2097,7 @@ struct megasas_instance {
2097 u8 UnevenSpanSupport; 2097 u8 UnevenSpanSupport;
2098 2098
2099 u8 supportmax256vd; 2099 u8 supportmax256vd;
2100 u8 allow_fw_scan; 2100 u8 pd_list_not_supported;
2101 u16 fw_supported_vd_count; 2101 u16 fw_supported_vd_count;
2102 u16 fw_supported_pd_count; 2102 u16 fw_supported_pd_count;
2103 2103
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5c08568ccfbf..69d375b8f2e1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1838,7 +1838,7 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1838 struct megasas_instance *instance; 1838 struct megasas_instance *instance;
1839 1839
1840 instance = megasas_lookup_instance(sdev->host->host_no); 1840 instance = megasas_lookup_instance(sdev->host->host_no);
1841 if (instance->allow_fw_scan) { 1841 if (instance->pd_list_not_supported) {
1842 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS && 1842 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1843 sdev->type == TYPE_DISK) { 1843 sdev->type == TYPE_DISK) {
1844 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1844 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
@@ -1874,7 +1874,8 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1874 pd_index = 1874 pd_index =
1875 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1875 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1876 sdev->id; 1876 sdev->id;
1877 if ((instance->allow_fw_scan || instance->pd_list[pd_index].driveState == 1877 if ((instance->pd_list_not_supported ||
1878 instance->pd_list[pd_index].driveState ==
1878 MR_PD_STATE_SYSTEM)) { 1879 MR_PD_STATE_SYSTEM)) {
1879 goto scan_target; 1880 goto scan_target;
1880 } 1881 }
@@ -4087,7 +4088,13 @@ megasas_get_pd_list(struct megasas_instance *instance)
4087 4088
4088 switch (ret) { 4089 switch (ret) {
4089 case DCMD_FAILED: 4090 case DCMD_FAILED:
4090 megaraid_sas_kill_hba(instance); 4091 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4092 "failed/not supported by firmware\n");
4093
4094 if (instance->ctrl_context)
4095 megaraid_sas_kill_hba(instance);
4096 else
4097 instance->pd_list_not_supported = 1;
4091 break; 4098 break;
4092 case DCMD_TIMEOUT: 4099 case DCMD_TIMEOUT:
4093 4100
@@ -5034,7 +5041,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
5034 case PCI_DEVICE_ID_DELL_PERC5: 5041 case PCI_DEVICE_ID_DELL_PERC5:
5035 default: 5042 default:
5036 instance->instancet = &megasas_instance_template_xscale; 5043 instance->instancet = &megasas_instance_template_xscale;
5037 instance->allow_fw_scan = 1;
5038 break; 5044 break;
5039 } 5045 }
5040 5046
diff --git a/drivers/scsi/scsi_sas_internal.h b/drivers/scsi/scsi_sas_internal.h
index 6266a5d73d0f..e659912498bd 100644
--- a/drivers/scsi/scsi_sas_internal.h
+++ b/drivers/scsi/scsi_sas_internal.h
@@ -4,7 +4,7 @@
4#define SAS_HOST_ATTRS 0 4#define SAS_HOST_ATTRS 0
5#define SAS_PHY_ATTRS 17 5#define SAS_PHY_ATTRS 17
6#define SAS_PORT_ATTRS 1 6#define SAS_PORT_ATTRS 1
7#define SAS_RPORT_ATTRS 7 7#define SAS_RPORT_ATTRS 8
8#define SAS_END_DEV_ATTRS 5 8#define SAS_END_DEV_ATTRS 5
9#define SAS_EXPANDER_ATTRS 7 9#define SAS_EXPANDER_ATTRS 7
10 10
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d16441961f3a..92ffd2406f97 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1105,7 +1105,7 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
1105 if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) 1105 if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80)
1106 return 0; 1106 return 0;
1107 1107
1108 if (attr == &dev_attr_vpd_pg83 && sdev->vpd_pg83) 1108 if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83)
1109 return 0; 1109 return 0;
1110 1110
1111 return S_IRUGO; 1111 return S_IRUGO;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 80520e2f0fa2..b6f958193dad 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1286,6 +1286,7 @@ sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols);
1286sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", 1286sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
1287 unsigned long long); 1287 unsigned long long);
1288sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); 1288sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
1289sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32);
1289 1290
1290/* only need 8 bytes of data plus header (4 or 8) */ 1291/* only need 8 bytes of data plus header (4 or 8) */
1291#define BUF_SIZE 64 1292#define BUF_SIZE 64
@@ -1886,6 +1887,7 @@ sas_attach_transport(struct sas_function_template *ft)
1886 SETUP_RPORT_ATTRIBUTE(rphy_device_type); 1887 SETUP_RPORT_ATTRIBUTE(rphy_device_type);
1887 SETUP_RPORT_ATTRIBUTE(rphy_sas_address); 1888 SETUP_RPORT_ATTRIBUTE(rphy_sas_address);
1888 SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); 1889 SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier);
1890 SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id);
1889 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, 1891 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier,
1890 get_enclosure_identifier); 1892 get_enclosure_identifier);
1891 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, 1893 SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier,
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 4f38d008bfb4..3aedf73f1131 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2013-2015, Linux Foundation. All rights reserved. 2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and 5 * it under the terms of the GNU General Public License version 2 and
@@ -16,8 +16,8 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/phy/phy.h> 18#include <linux/phy/phy.h>
19
20#include <linux/phy/phy-qcom-ufs.h> 19#include <linux/phy/phy-qcom-ufs.h>
20
21#include "ufshcd.h" 21#include "ufshcd.h"
22#include "ufshcd-pltfrm.h" 22#include "ufshcd-pltfrm.h"
23#include "unipro.h" 23#include "unipro.h"
@@ -58,6 +58,12 @@ static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
58 len * 4, false); 58 len * 4, false);
59} 59}
60 60
61static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
62 char *prefix, void *priv)
63{
64 ufs_qcom_dump_regs(hba, offset, len, prefix);
65}
66
61static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) 67static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
62{ 68{
63 int err = 0; 69 int err = 0;
@@ -106,9 +112,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
106 if (!host->is_lane_clks_enabled) 112 if (!host->is_lane_clks_enabled)
107 return; 113 return;
108 114
109 clk_disable_unprepare(host->tx_l1_sync_clk); 115 if (host->hba->lanes_per_direction > 1)
116 clk_disable_unprepare(host->tx_l1_sync_clk);
110 clk_disable_unprepare(host->tx_l0_sync_clk); 117 clk_disable_unprepare(host->tx_l0_sync_clk);
111 clk_disable_unprepare(host->rx_l1_sync_clk); 118 if (host->hba->lanes_per_direction > 1)
119 clk_disable_unprepare(host->rx_l1_sync_clk);
112 clk_disable_unprepare(host->rx_l0_sync_clk); 120 clk_disable_unprepare(host->rx_l0_sync_clk);
113 121
114 host->is_lane_clks_enabled = false; 122 host->is_lane_clks_enabled = false;
@@ -132,21 +140,24 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
132 if (err) 140 if (err)
133 goto disable_rx_l0; 141 goto disable_rx_l0;
134 142
135 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk", 143 if (host->hba->lanes_per_direction > 1) {
136 host->rx_l1_sync_clk); 144 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
137 if (err) 145 host->rx_l1_sync_clk);
138 goto disable_tx_l0; 146 if (err)
147 goto disable_tx_l0;
139 148
140 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk", 149 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
141 host->tx_l1_sync_clk); 150 host->tx_l1_sync_clk);
142 if (err) 151 if (err)
143 goto disable_rx_l1; 152 goto disable_rx_l1;
153 }
144 154
145 host->is_lane_clks_enabled = true; 155 host->is_lane_clks_enabled = true;
146 goto out; 156 goto out;
147 157
148disable_rx_l1: 158disable_rx_l1:
149 clk_disable_unprepare(host->rx_l1_sync_clk); 159 if (host->hba->lanes_per_direction > 1)
160 clk_disable_unprepare(host->rx_l1_sync_clk);
150disable_tx_l0: 161disable_tx_l0:
151 clk_disable_unprepare(host->tx_l0_sync_clk); 162 clk_disable_unprepare(host->tx_l0_sync_clk);
152disable_rx_l0: 163disable_rx_l0:
@@ -170,14 +181,16 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
170 if (err) 181 if (err)
171 goto out; 182 goto out;
172 183
173 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk", 184 /* In case of single lane per direction, don't read lane1 clocks */
174 &host->rx_l1_sync_clk); 185 if (host->hba->lanes_per_direction > 1) {
175 if (err) 186 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
176 goto out; 187 &host->rx_l1_sync_clk);
177 188 if (err)
178 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk", 189 goto out;
179 &host->tx_l1_sync_clk);
180 190
191 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
192 &host->tx_l1_sync_clk);
193 }
181out: 194out:
182 return err; 195 return err;
183} 196}
@@ -267,9 +280,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
267 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B); 280 ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
268 281
269 if (ret) { 282 if (ret) {
270 dev_err(hba->dev, 283 dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
271 "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n", 284 __func__, ret);
272 __func__, ret);
273 goto out; 285 goto out;
274 } 286 }
275 287
@@ -519,6 +531,18 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
519 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 531 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
520 150); 532 150);
521 533
534 /*
535 * Some UFS devices (and may be host) have issues if LCC is
536 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
537 * before link startup which will make sure that both host
538 * and device TX LCC are disabled once link startup is
539 * completed.
540 */
541 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
542 err = ufshcd_dme_set(hba,
543 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
544 0);
545
522 break; 546 break;
523 case POST_CHANGE: 547 case POST_CHANGE:
524 ufs_qcom_link_startup_post_change(hba); 548 ufs_qcom_link_startup_post_change(hba);
@@ -962,6 +986,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
962 goto out; 986 goto out;
963 } 987 }
964 988
989 /* enable the device ref clock before changing to HS mode */
990 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
991 ufshcd_is_hs_mode(dev_req_params))
992 ufs_qcom_dev_ref_clk_ctrl(host, true);
965 break; 993 break;
966 case POST_CHANGE: 994 case POST_CHANGE:
967 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx, 995 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -989,6 +1017,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
989 memcpy(&host->dev_req_params, 1017 memcpy(&host->dev_req_params,
990 dev_req_params, sizeof(*dev_req_params)); 1018 dev_req_params, sizeof(*dev_req_params));
991 ufs_qcom_update_bus_bw_vote(host); 1019 ufs_qcom_update_bus_bw_vote(host);
1020
1021 /* disable the device ref clock if entered PWM mode */
1022 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1023 !ufshcd_is_hs_mode(dev_req_params))
1024 ufs_qcom_dev_ref_clk_ctrl(host, false);
992 break; 1025 break;
993 default: 1026 default:
994 ret = -EINVAL; 1027 ret = -EINVAL;
@@ -1090,6 +1123,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
1090 ufs_qcom_phy_disable_iface_clk(host->generic_phy); 1123 ufs_qcom_phy_disable_iface_clk(host->generic_phy);
1091 goto out; 1124 goto out;
1092 } 1125 }
1126 /* enable the device ref clock for HS mode*/
1127 if (ufshcd_is_hs_mode(&hba->pwr_info))
1128 ufs_qcom_dev_ref_clk_ctrl(host, true);
1093 vote = host->bus_vote.saved_vote; 1129 vote = host->bus_vote.saved_vote;
1094 if (vote == host->bus_vote.min_bw_vote) 1130 if (vote == host->bus_vote.min_bw_vote)
1095 ufs_qcom_update_bus_bw_vote(host); 1131 ufs_qcom_update_bus_bw_vote(host);
@@ -1367,6 +1403,74 @@ out:
1367 return err; 1403 return err;
1368} 1404}
1369 1405
1406static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1407 void *priv, void (*print_fn)(struct ufs_hba *hba,
1408 int offset, int num_regs, char *str, void *priv))
1409{
1410 u32 reg;
1411 struct ufs_qcom_host *host;
1412
1413 if (unlikely(!hba)) {
1414 pr_err("%s: hba is NULL\n", __func__);
1415 return;
1416 }
1417 if (unlikely(!print_fn)) {
1418 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1419 return;
1420 }
1421
1422 host = ufshcd_get_variant(hba);
1423 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1424 return;
1425
1426 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1427 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1428
1429 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1430 reg |= UFS_BIT(17);
1431 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1432
1433 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1434 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1435
1436 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1437 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1438
1439 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1440 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1441
1442 ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
1443
1444 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1445 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1446
1447 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1448 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1449
1450 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1451 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1452
1453 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1454 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1455
1456 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1457 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1458
1459 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1460 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1461
1462 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1463 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1464}
1465
1466static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1467{
1468 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
1469 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1470 else
1471 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1472}
1473
1370static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host) 1474static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1371{ 1475{
1372 /* provide a legal default configuration */ 1476 /* provide a legal default configuration */
@@ -1475,6 +1579,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1475 ufshcd_rmwl(host->hba, mask, 1579 ufshcd_rmwl(host->hba, mask,
1476 (u32)host->testbus.select_minor << offset, 1580 (u32)host->testbus.select_minor << offset,
1477 reg); 1581 reg);
1582 ufs_qcom_enable_test_bus(host);
1478 ufshcd_release(host->hba); 1583 ufshcd_release(host->hba);
1479 pm_runtime_put_sync(host->hba->dev); 1584 pm_runtime_put_sync(host->hba->dev);
1480 1585
@@ -1491,8 +1596,10 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1491 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, 1596 ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
1492 "HCI Vendor Specific Registers "); 1597 "HCI Vendor Specific Registers ");
1493 1598
1599 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1494 ufs_qcom_testbus_read(hba); 1600 ufs_qcom_testbus_read(hba);
1495} 1601}
1602
1496/** 1603/**
1497 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations 1604 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1498 * 1605 *
@@ -1537,7 +1644,7 @@ static int ufs_qcom_probe(struct platform_device *pdev)
1537 * ufs_qcom_remove - set driver_data of the device to NULL 1644 * ufs_qcom_remove - set driver_data of the device to NULL
1538 * @pdev: pointer to platform device handle 1645 * @pdev: pointer to platform device handle
1539 * 1646 *
1540 * Always return 0 1647 * Always returns 0
1541 */ 1648 */
1542static int ufs_qcom_remove(struct platform_device *pdev) 1649static int ufs_qcom_remove(struct platform_device *pdev)
1543{ 1650{
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 36249b35f858..a19307a57ce2 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -241,6 +241,15 @@ struct ufs_qcom_host {
241 struct ufs_qcom_testbus testbus; 241 struct ufs_qcom_testbus testbus;
242}; 242};
243 243
244static inline u32
245ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
246{
247 if (host->hw_ver.major <= 0x02)
248 return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
249
250 return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
251};
252
244#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba) 253#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
245#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba) 254#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
246#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba) 255#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 54a16cef0367..b291fa6ed2ad 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -43,6 +43,7 @@
43#define GENERAL_UPIU_REQUEST_SIZE 32 43#define GENERAL_UPIU_REQUEST_SIZE 32
44#define QUERY_DESC_MAX_SIZE 255 44#define QUERY_DESC_MAX_SIZE 255
45#define QUERY_DESC_MIN_SIZE 2 45#define QUERY_DESC_MIN_SIZE 2
46#define QUERY_DESC_HDR_SIZE 2
46#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ 47#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
47 (sizeof(struct utp_upiu_header))) 48 (sizeof(struct utp_upiu_header)))
48 49
@@ -195,6 +196,37 @@ enum unit_desc_param {
195 UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, 196 UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
196}; 197};
197 198
199/* Device descriptor parameters offsets in bytes*/
200enum device_desc_param {
201 DEVICE_DESC_PARAM_LEN = 0x0,
202 DEVICE_DESC_PARAM_TYPE = 0x1,
203 DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
204 DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
205 DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
206 DEVICE_DESC_PARAM_PRTCL = 0x5,
207 DEVICE_DESC_PARAM_NUM_LU = 0x6,
208 DEVICE_DESC_PARAM_NUM_WLU = 0x7,
209 DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
210 DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
211 DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
212 DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
213 DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
214 DEVICE_DESC_PARAM_SEC_LU = 0xD,
215 DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
216 DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
217 DEVICE_DESC_PARAM_SPEC_VER = 0x10,
218 DEVICE_DESC_PARAM_MANF_DATE = 0x12,
219 DEVICE_DESC_PARAM_MANF_NAME = 0x14,
220 DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
221 DEVICE_DESC_PARAM_SN = 0x16,
222 DEVICE_DESC_PARAM_OEM_ID = 0x17,
223 DEVICE_DESC_PARAM_MANF_ID = 0x18,
224 DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
225 DEVICE_DESC_PARAM_UD_LEN = 0x1B,
226 DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
227 DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
228};
229
198/* 230/*
199 * Logical Unit Write Protect 231 * Logical Unit Write Protect
200 * 00h: LU not write protected 232 * 00h: LU not write protected
@@ -469,6 +501,7 @@ struct ufs_vreg {
469 struct regulator *reg; 501 struct regulator *reg;
470 const char *name; 502 const char *name;
471 bool enabled; 503 bool enabled;
504 bool unused;
472 int min_uV; 505 int min_uV;
473 int max_uV; 506 int max_uV;
474 int min_uA; 507 int min_uA;
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
new file mode 100644
index 000000000000..ee4ab85e2801
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -0,0 +1,151 @@
1/*
2 * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef _UFS_QUIRKS_H_
16#define _UFS_QUIRKS_H_
17
18/* return true if s1 is a prefix of s2 */
19#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
20
21#define UFS_ANY_VENDOR 0xFFFF
22#define UFS_ANY_MODEL "ANY_MODEL"
23
24#define MAX_MODEL_LEN 16
25
26#define UFS_VENDOR_TOSHIBA 0x198
27#define UFS_VENDOR_SAMSUNG 0x1CE
28
29/**
30 * ufs_device_info - ufs device details
31 * @wmanufacturerid: card details
32 * @model: card model
33 */
34struct ufs_device_info {
35 u16 wmanufacturerid;
36 char model[MAX_MODEL_LEN + 1];
37};
38
39/**
40 * ufs_dev_fix - ufs device quirk info
41 * @card: ufs card details
42 * @quirk: device quirk
43 */
44struct ufs_dev_fix {
45 struct ufs_device_info card;
46 unsigned int quirk;
47};
48
49#define END_FIX { { 0 }, 0 }
50
51/* add specific device quirk */
52#define UFS_FIX(_vendor, _model, _quirk) \
53 { \
54 .card.wmanufacturerid = (_vendor),\
55 .card.model = (_model), \
56 .quirk = (_quirk), \
57 }
58
59/*
60 * If UFS device is having issue in processing LCC (Line Control
61 * Command) coming from UFS host controller then enable this quirk.
62 * When this quirk is enabled, host controller driver should disable
63 * the LCC transmission on UFS host controller (by clearing
64 * TX_LCC_ENABLE attribute of host to 0).
65 */
66#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
67
68/*
69 * Some UFS devices don't need VCCQ rail for device operations. Enabling this
70 * quirk for such devices will make sure that VCCQ rail is not voted.
71 */
72#define UFS_DEVICE_NO_VCCQ (1 << 1)
73
74/*
75 * Some vendor's UFS device sends back to back NACs for the DL data frames
76 * causing the host controller to raise the DFES error status. Sometimes
77 * such UFS devices send back to back NAC without waiting for new
78 * retransmitted DL frame from the host and in such cases it might be possible
79 * the Host UniPro goes into bad state without raising the DFES error
80 * interrupt. If this happens then all the pending commands would timeout
81 * only after respective SW command (which is generally too large).
82 *
83 * We can workaround such device behaviour like this:
84 * - As soon as SW sees the DL NAC error, it should schedule the error handler
85 * - Error handler would sleep for 50ms to see if there are any fatal errors
86 * raised by UFS controller.
87 * - If there are fatal errors then SW does normal error recovery.
88 * - If there are no fatal errors then SW sends the NOP command to device
89 * to check if link is alive.
90 * - If NOP command times out, SW does normal error recovery
91 * - If NOP command succeed, skip the error handling.
92 *
93 * If DL NAC error is seen multiple times with some vendor's UFS devices then
94 * enable this quirk to initiate quick error recovery and also silence related
95 * error logs to reduce spamming of kernel logs.
96 */
97#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
98
99/*
100 * Some UFS devices may not work properly after resume if the link was kept
101 * in off state during suspend. Enabling this quirk will not allow the
102 * link to be kept in off state during suspend.
103 */
104#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
105
106/*
107 * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
108 * 600us which may not be enough for reliable hibern8 exit hardware sequence
109 * from UFS device.
110 * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
111 * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
112 */
113#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
114
115/*
116 * Some UFS memory devices may have really low read/write throughput in
117 * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
118 * never enabled for such devices.
119 */
120#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
121
122/*
123 * It seems some UFS devices may keep drawing more than sleep current
124 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
125 * To avoid this situation, add 2ms delay before putting these UFS
126 * rails in LPM mode.
127 */
128#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM (1 << 6)
129
130struct ufs_hba;
131void ufs_advertise_fixup_device(struct ufs_hba *hba);
132
133static struct ufs_dev_fix ufs_fixups[] = {
134 /* UFS cards deviations table */
135 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
136 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
137 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
138 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
139 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
140 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
141 UFS_DEVICE_NO_FASTAUTO),
142 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
143 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
144 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
145 UFS_DEVICE_QUIRK_PA_TACTIVATE),
146 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
147 UFS_DEVICE_QUIRK_PA_TACTIVATE),
148
149 END_FIX
150};
151#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index d2a7b127b05c..718f12e09885 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,8 @@
40#include "ufshcd.h" 40#include "ufshcd.h"
41#include "ufshcd-pltfrm.h" 41#include "ufshcd-pltfrm.h"
42 42
43#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
44
43static int ufshcd_parse_clock_info(struct ufs_hba *hba) 45static int ufshcd_parse_clock_info(struct ufs_hba *hba)
44{ 46{
45 int ret = 0; 47 int ret = 0;
@@ -277,6 +279,21 @@ void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
277} 279}
278EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); 280EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
279 281
282static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
283{
284 struct device *dev = hba->dev;
285 int ret;
286
287 ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
288 &hba->lanes_per_direction);
289 if (ret) {
290 dev_dbg(hba->dev,
291 "%s: failed to read lanes-per-direction, ret=%d\n",
292 __func__, ret);
293 hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
294 }
295}
296
280/** 297/**
281 * ufshcd_pltfrm_init - probe routine of the driver 298 * ufshcd_pltfrm_init - probe routine of the driver
282 * @pdev: pointer to Platform device handle 299 * @pdev: pointer to Platform device handle
@@ -331,6 +348,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
331 pm_runtime_set_active(&pdev->dev); 348 pm_runtime_set_active(&pdev->dev);
332 pm_runtime_enable(&pdev->dev); 349 pm_runtime_enable(&pdev->dev);
333 350
351 ufshcd_init_lanes_per_dir(hba);
352
334 err = ufshcd_init(hba, mmio_base, irq); 353 err = ufshcd_init(hba, mmio_base, irq);
335 if (err) { 354 if (err) {
336 dev_err(dev, "Initialization failed\n"); 355 dev_err(dev, "Initialization failed\n");
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9c1b94bef8f3..f8fa72c31a9d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -39,8 +39,10 @@
39 39
40#include <linux/async.h> 40#include <linux/async.h>
41#include <linux/devfreq.h> 41#include <linux/devfreq.h>
42 42#include <linux/nls.h>
43#include <linux/of.h>
43#include "ufshcd.h" 44#include "ufshcd.h"
45#include "ufs_quirks.h"
44#include "unipro.h" 46#include "unipro.h"
45 47
46#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 48#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -131,9 +133,11 @@ enum {
131/* UFSHCD UIC layer error flags */ 133/* UFSHCD UIC layer error flags */
132enum { 134enum {
133 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ 135 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
134 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ 136 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
135 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ 137 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
136 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ 138 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
139 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
140 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
137}; 141};
138 142
139/* Interrupt configuration options */ 143/* Interrupt configuration options */
@@ -193,6 +197,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba);
193static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, 197static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
194 bool skip_ref_clk); 198 bool skip_ref_clk);
195static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); 199static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
200static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
196static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); 201static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
197static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); 202static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
198static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); 203static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
@@ -231,6 +236,16 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
231 } 236 }
232} 237}
233 238
239/* replace non-printable or non-ASCII characters with spaces */
240static inline void ufshcd_remove_non_printable(char *val)
241{
242 if (!val)
243 return;
244
245 if (*val < 0x20 || *val > 0x7e)
246 *val = ' ';
247}
248
234/* 249/*
235 * ufshcd_wait_for_register - wait for register value to change 250 * ufshcd_wait_for_register - wait for register value to change
236 * @hba - per-adapter interface 251 * @hba - per-adapter interface
@@ -239,11 +254,13 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
239 * @val - wait condition 254 * @val - wait condition
240 * @interval_us - polling interval in microsecs 255 * @interval_us - polling interval in microsecs
241 * @timeout_ms - timeout in millisecs 256 * @timeout_ms - timeout in millisecs
257 * @can_sleep - perform sleep or just spin
242 * 258 *
243 * Returns -ETIMEDOUT on error, zero on success 259 * Returns -ETIMEDOUT on error, zero on success
244 */ 260 */
245static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, 261int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
246 u32 val, unsigned long interval_us, unsigned long timeout_ms) 262 u32 val, unsigned long interval_us,
263 unsigned long timeout_ms, bool can_sleep)
247{ 264{
248 int err = 0; 265 int err = 0;
249 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 266 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -252,9 +269,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
252 val = val & mask; 269 val = val & mask;
253 270
254 while ((ufshcd_readl(hba, reg) & mask) != val) { 271 while ((ufshcd_readl(hba, reg) & mask) != val) {
255 /* wakeup within 50us of expiry */ 272 if (can_sleep)
256 usleep_range(interval_us, interval_us + 50); 273 usleep_range(interval_us, interval_us + 50);
257 274 else
275 udelay(interval_us);
258 if (time_after(jiffies, timeout)) { 276 if (time_after(jiffies, timeout)) {
259 if ((ufshcd_readl(hba, reg) & mask) != val) 277 if ((ufshcd_readl(hba, reg) & mask) != val)
260 err = -ETIMEDOUT; 278 err = -ETIMEDOUT;
@@ -552,6 +570,34 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
552 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; 570 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
553} 571}
554 572
573u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
574{
575 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
576 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
577 (hba->ufs_version == UFSHCI_VERSION_11))
578 return UFS_UNIPRO_VER_1_41;
579 else
580 return UFS_UNIPRO_VER_1_6;
581}
582EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
583
584static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
585{
586 /*
587 * If both host and device support UniPro ver1.6 or later, PA layer
588 * parameters tuning happens during link startup itself.
589 *
590 * We can manually tune PA layer parameters if either host or device
591 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
592 * logic simple, we will only do manual tuning if local unipro version
593 * doesn't support ver1.6 or later.
594 */
595 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
596 return true;
597 else
598 return false;
599}
600
555static void ufshcd_ungate_work(struct work_struct *work) 601static void ufshcd_ungate_work(struct work_struct *work)
556{ 602{
557 int ret; 603 int ret;
@@ -1458,7 +1504,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
1458 */ 1504 */
1459 err = ufshcd_wait_for_register(hba, 1505 err = ufshcd_wait_for_register(hba,
1460 REG_UTP_TRANSFER_REQ_DOOR_BELL, 1506 REG_UTP_TRANSFER_REQ_DOOR_BELL,
1461 mask, ~mask, 1000, 1000); 1507 mask, ~mask, 1000, 1000, true);
1462 1508
1463 return err; 1509 return err;
1464} 1510}
@@ -1857,21 +1903,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba,
1857 return ret; 1903 return ret;
1858} 1904}
1859 1905
1860/** 1906static int __ufshcd_query_descriptor(struct ufs_hba *hba,
1861 * ufshcd_query_descriptor - API function for sending descriptor requests
1862 * hba: per-adapter instance
1863 * opcode: attribute opcode
1864 * idn: attribute idn to access
1865 * index: index field
1866 * selector: selector field
1867 * desc_buf: the buffer that contains the descriptor
1868 * buf_len: length parameter passed to the device
1869 *
1870 * Returns 0 for success, non-zero in case of failure.
1871 * The buf_len parameter will contain, on return, the length parameter
1872 * received on the response.
1873 */
1874static int ufshcd_query_descriptor(struct ufs_hba *hba,
1875 enum query_opcode opcode, enum desc_idn idn, u8 index, 1907 enum query_opcode opcode, enum desc_idn idn, u8 index,
1876 u8 selector, u8 *desc_buf, int *buf_len) 1908 u8 selector, u8 *desc_buf, int *buf_len)
1877{ 1909{
@@ -1936,6 +1968,39 @@ out:
1936} 1968}
1937 1969
1938/** 1970/**
1971 * ufshcd_query_descriptor_retry - API function for sending descriptor
1972 * requests
1973 * hba: per-adapter instance
1974 * opcode: attribute opcode
1975 * idn: attribute idn to access
1976 * index: index field
1977 * selector: selector field
1978 * desc_buf: the buffer that contains the descriptor
1979 * buf_len: length parameter passed to the device
1980 *
1981 * Returns 0 for success, non-zero in case of failure.
1982 * The buf_len parameter will contain, on return, the length parameter
1983 * received on the response.
1984 */
1985int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
1986 enum query_opcode opcode, enum desc_idn idn, u8 index,
1987 u8 selector, u8 *desc_buf, int *buf_len)
1988{
1989 int err;
1990 int retries;
1991
1992 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1993 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
1994 selector, desc_buf, buf_len);
1995 if (!err || err == -EINVAL)
1996 break;
1997 }
1998
1999 return err;
2000}
2001EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
2002
2003/**
1939 * ufshcd_read_desc_param - read the specified descriptor parameter 2004 * ufshcd_read_desc_param - read the specified descriptor parameter
1940 * @hba: Pointer to adapter instance 2005 * @hba: Pointer to adapter instance
1941 * @desc_id: descriptor idn value 2006 * @desc_id: descriptor idn value
@@ -1977,9 +2042,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
1977 return -ENOMEM; 2042 return -ENOMEM;
1978 } 2043 }
1979 2044
1980 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, 2045 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
1981 desc_id, desc_index, 0, desc_buf, 2046 desc_id, desc_index, 0, desc_buf,
1982 &buff_len); 2047 &buff_len);
1983 2048
1984 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) || 2049 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
1985 (desc_buf[QUERY_DESC_LENGTH_OFFSET] != 2050 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
@@ -2017,6 +2082,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
2017 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); 2082 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
2018} 2083}
2019 2084
2085int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
2086{
2087 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
2088}
2089EXPORT_SYMBOL(ufshcd_read_device_desc);
2090
2091/**
2092 * ufshcd_read_string_desc - read string descriptor
2093 * @hba: pointer to adapter instance
2094 * @desc_index: descriptor index
2095 * @buf: pointer to buffer where descriptor would be read
2096 * @size: size of buf
2097 * @ascii: if true convert from unicode to ascii characters
2098 *
2099 * Return 0 in case of success, non-zero otherwise
2100 */
2101int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
2102 u32 size, bool ascii)
2103{
2104 int err = 0;
2105
2106 err = ufshcd_read_desc(hba,
2107 QUERY_DESC_IDN_STRING, desc_index, buf, size);
2108
2109 if (err) {
2110 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
2111 __func__, QUERY_REQ_RETRIES, err);
2112 goto out;
2113 }
2114
2115 if (ascii) {
2116 int desc_len;
2117 int ascii_len;
2118 int i;
2119 char *buff_ascii;
2120
2121 desc_len = buf[0];
2122 /* remove header and divide by 2 to move from UTF16 to UTF8 */
2123 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
2124 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
2125 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
2126 __func__);
2127 err = -ENOMEM;
2128 goto out;
2129 }
2130
2131 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
2132 if (!buff_ascii) {
2133 err = -ENOMEM;
2134 goto out_free_buff;
2135 }
2136
2137 /*
2138 * the descriptor contains string in UTF16 format
2139 * we need to convert to utf-8 so it can be displayed
2140 */
2141 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
2142 desc_len - QUERY_DESC_HDR_SIZE,
2143 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
2144
2145 /* replace non-printable or non-ASCII characters with spaces */
2146 for (i = 0; i < ascii_len; i++)
2147 ufshcd_remove_non_printable(&buff_ascii[i]);
2148
2149 memset(buf + QUERY_DESC_HDR_SIZE, 0,
2150 size - QUERY_DESC_HDR_SIZE);
2151 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
2152 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
2153out_free_buff:
2154 kfree(buff_ascii);
2155 }
2156out:
2157 return err;
2158}
2159EXPORT_SYMBOL(ufshcd_read_string_desc);
2160
2020/** 2161/**
2021 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter 2162 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
2022 * @hba: Pointer to adapter instance 2163 * @hba: Pointer to adapter instance
@@ -2814,6 +2955,23 @@ out:
2814} 2955}
2815 2956
2816/** 2957/**
2958 * ufshcd_hba_stop - Send controller to reset state
2959 * @hba: per adapter instance
2960 * @can_sleep: perform sleep or just spin
2961 */
2962static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
2963{
2964 int err;
2965
2966 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
2967 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
2968 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
2969 10, 1, can_sleep);
2970 if (err)
2971 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
2972}
2973
2974/**
2817 * ufshcd_hba_enable - initialize the controller 2975 * ufshcd_hba_enable - initialize the controller
2818 * @hba: per adapter instance 2976 * @hba: per adapter instance
2819 * 2977 *
@@ -2833,18 +2991,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
2833 * development and testing of this driver. msleep can be changed to 2991 * development and testing of this driver. msleep can be changed to
2834 * mdelay and retry count can be reduced based on the controller. 2992 * mdelay and retry count can be reduced based on the controller.
2835 */ 2993 */
2836 if (!ufshcd_is_hba_active(hba)) { 2994 if (!ufshcd_is_hba_active(hba))
2837
2838 /* change controller state to "reset state" */ 2995 /* change controller state to "reset state" */
2839 ufshcd_hba_stop(hba); 2996 ufshcd_hba_stop(hba, true);
2840
2841 /*
2842 * This delay is based on the testing done with UFS host
2843 * controller FPGA. The delay can be changed based on the
2844 * host controller used.
2845 */
2846 msleep(5);
2847 }
2848 2997
2849 /* UniPro link is disabled at this point */ 2998 /* UniPro link is disabled at this point */
2850 ufshcd_set_link_off(hba); 2999 ufshcd_set_link_off(hba);
@@ -3365,31 +3514,18 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
3365} 3514}
3366 3515
3367/** 3516/**
3368 * ufshcd_transfer_req_compl - handle SCSI and query command completion 3517 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
3369 * @hba: per adapter instance 3518 * @hba: per adapter instance
3519 * @completed_reqs: requests to complete
3370 */ 3520 */
3371static void ufshcd_transfer_req_compl(struct ufs_hba *hba) 3521static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
3522 unsigned long completed_reqs)
3372{ 3523{
3373 struct ufshcd_lrb *lrbp; 3524 struct ufshcd_lrb *lrbp;
3374 struct scsi_cmnd *cmd; 3525 struct scsi_cmnd *cmd;
3375 unsigned long completed_reqs;
3376 u32 tr_doorbell;
3377 int result; 3526 int result;
3378 int index; 3527 int index;
3379 3528
3380 /* Resetting interrupt aggregation counters first and reading the
3381 * DOOR_BELL afterward allows us to handle all the completed requests.
3382 * In order to prevent other interrupts starvation the DB is read once
3383 * after reset. The down side of this solution is the possibility of
3384 * false interrupt if device completes another request after resetting
3385 * aggregation and before reading the DB.
3386 */
3387 if (ufshcd_is_intr_aggr_allowed(hba))
3388 ufshcd_reset_intr_aggr(hba);
3389
3390 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3391 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3392
3393 for_each_set_bit(index, &completed_reqs, hba->nutrs) { 3529 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
3394 lrbp = &hba->lrb[index]; 3530 lrbp = &hba->lrb[index];
3395 cmd = lrbp->cmd; 3531 cmd = lrbp->cmd;
@@ -3419,6 +3555,31 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3419} 3555}
3420 3556
3421/** 3557/**
3558 * ufshcd_transfer_req_compl - handle SCSI and query command completion
3559 * @hba: per adapter instance
3560 */
3561static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
3562{
3563 unsigned long completed_reqs;
3564 u32 tr_doorbell;
3565
3566 /* Resetting interrupt aggregation counters first and reading the
3567 * DOOR_BELL afterward allows us to handle all the completed requests.
3568 * In order to prevent other interrupts starvation the DB is read once
3569 * after reset. The down side of this solution is the possibility of
3570 * false interrupt if device completes another request after resetting
3571 * aggregation and before reading the DB.
3572 */
3573 if (ufshcd_is_intr_aggr_allowed(hba))
3574 ufshcd_reset_intr_aggr(hba);
3575
3576 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
3577 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
3578
3579 __ufshcd_transfer_req_compl(hba, completed_reqs);
3580}
3581
3582/**
3422 * ufshcd_disable_ee - disable exception event 3583 * ufshcd_disable_ee - disable exception event
3423 * @hba: per-adapter instance 3584 * @hba: per-adapter instance
3424 * @mask: exception event to disable 3585 * @mask: exception event to disable
@@ -3630,7 +3791,7 @@ out:
3630 */ 3791 */
3631static int ufshcd_urgent_bkops(struct ufs_hba *hba) 3792static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3632{ 3793{
3633 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT); 3794 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
3634} 3795}
3635 3796
3636static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 3797static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
@@ -3639,6 +3800,43 @@ static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
3639 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status); 3800 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
3640} 3801}
3641 3802
3803static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
3804{
3805 int err;
3806 u32 curr_status = 0;
3807
3808 if (hba->is_urgent_bkops_lvl_checked)
3809 goto enable_auto_bkops;
3810
3811 err = ufshcd_get_bkops_status(hba, &curr_status);
3812 if (err) {
3813 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
3814 __func__, err);
3815 goto out;
3816 }
3817
3818 /*
3819 * We are seeing that some devices are raising the urgent bkops
3820 * exception events even when BKOPS status doesn't indicate performace
3821 * impacted or critical. Handle these device by determining their urgent
3822 * bkops status at runtime.
3823 */
3824 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
3825 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
3826 __func__, curr_status);
3827 /* update the current status as the urgent bkops level */
3828 hba->urgent_bkops_lvl = curr_status;
3829 hba->is_urgent_bkops_lvl_checked = true;
3830 }
3831
3832enable_auto_bkops:
3833 err = ufshcd_enable_auto_bkops(hba);
3834out:
3835 if (err < 0)
3836 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
3837 __func__, err);
3838}
3839
3642/** 3840/**
3643 * ufshcd_exception_event_handler - handle exceptions raised by device 3841 * ufshcd_exception_event_handler - handle exceptions raised by device
3644 * @work: pointer to work data 3842 * @work: pointer to work data
@@ -3662,17 +3860,95 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
3662 } 3860 }
3663 3861
3664 status &= hba->ee_ctrl_mask; 3862 status &= hba->ee_ctrl_mask;
3665 if (status & MASK_EE_URGENT_BKOPS) { 3863
3666 err = ufshcd_urgent_bkops(hba); 3864 if (status & MASK_EE_URGENT_BKOPS)
3667 if (err < 0) 3865 ufshcd_bkops_exception_event_handler(hba);
3668 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 3866
3669 __func__, err);
3670 }
3671out: 3867out:
3672 pm_runtime_put_sync(hba->dev); 3868 pm_runtime_put_sync(hba->dev);
3673 return; 3869 return;
3674} 3870}
3675 3871
3872/* Complete requests that have door-bell cleared */
3873static void ufshcd_complete_requests(struct ufs_hba *hba)
3874{
3875 ufshcd_transfer_req_compl(hba);
3876 ufshcd_tmc_handler(hba);
3877}
3878
3879/**
3880 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
3881 * to recover from the DL NAC errors or not.
3882 * @hba: per-adapter instance
3883 *
3884 * Returns true if error handling is required, false otherwise
3885 */
3886static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
3887{
3888 unsigned long flags;
3889 bool err_handling = true;
3890
3891 spin_lock_irqsave(hba->host->host_lock, flags);
3892 /*
3893 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
3894 * device fatal error and/or DL NAC & REPLAY timeout errors.
3895 */
3896 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
3897 goto out;
3898
3899 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
3900 ((hba->saved_err & UIC_ERROR) &&
3901 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
3902 goto out;
3903
3904 if ((hba->saved_err & UIC_ERROR) &&
3905 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
3906 int err;
3907 /*
3908 * wait for 50ms to see if we can get any other errors or not.
3909 */
3910 spin_unlock_irqrestore(hba->host->host_lock, flags);
3911 msleep(50);
3912 spin_lock_irqsave(hba->host->host_lock, flags);
3913
3914 /*
3915 * now check if we have got any other severe errors other than
3916 * DL NAC error?
3917 */
3918 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3919 ((hba->saved_err & UIC_ERROR) &&
3920 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
3921 goto out;
3922
3923 /*
3924 * As DL NAC is the only error received so far, send out NOP
3925 * command to confirm if link is still active or not.
3926 * - If we don't get any response then do error recovery.
3927 * - If we get response then clear the DL NAC error bit.
3928 */
3929
3930 spin_unlock_irqrestore(hba->host->host_lock, flags);
3931 err = ufshcd_verify_dev_init(hba);
3932 spin_lock_irqsave(hba->host->host_lock, flags);
3933
3934 if (err)
3935 goto out;
3936
3937 /* Link seems to be alive hence ignore the DL NAC errors */
3938 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
3939 hba->saved_err &= ~UIC_ERROR;
3940 /* clear NAC error */
3941 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
3942 if (!hba->saved_uic_err) {
3943 err_handling = false;
3944 goto out;
3945 }
3946 }
3947out:
3948 spin_unlock_irqrestore(hba->host->host_lock, flags);
3949 return err_handling;
3950}
3951
3676/** 3952/**
3677 * ufshcd_err_handler - handle UFS errors that require s/w attention 3953 * ufshcd_err_handler - handle UFS errors that require s/w attention
3678 * @work: pointer to work structure 3954 * @work: pointer to work structure
@@ -3685,6 +3961,7 @@ static void ufshcd_err_handler(struct work_struct *work)
3685 u32 err_tm = 0; 3961 u32 err_tm = 0;
3686 int err = 0; 3962 int err = 0;
3687 int tag; 3963 int tag;
3964 bool needs_reset = false;
3688 3965
3689 hba = container_of(work, struct ufs_hba, eh_work); 3966 hba = container_of(work, struct ufs_hba, eh_work);
3690 3967
@@ -3692,40 +3969,86 @@ static void ufshcd_err_handler(struct work_struct *work)
3692 ufshcd_hold(hba, false); 3969 ufshcd_hold(hba, false);
3693 3970
3694 spin_lock_irqsave(hba->host->host_lock, flags); 3971 spin_lock_irqsave(hba->host->host_lock, flags);
3695 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { 3972 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
3696 spin_unlock_irqrestore(hba->host->host_lock, flags);
3697 goto out; 3973 goto out;
3698 }
3699 3974
3700 hba->ufshcd_state = UFSHCD_STATE_RESET; 3975 hba->ufshcd_state = UFSHCD_STATE_RESET;
3701 ufshcd_set_eh_in_progress(hba); 3976 ufshcd_set_eh_in_progress(hba);
3702 3977
3703 /* Complete requests that have door-bell cleared by h/w */ 3978 /* Complete requests that have door-bell cleared by h/w */
3704 ufshcd_transfer_req_compl(hba); 3979 ufshcd_complete_requests(hba);
3705 ufshcd_tmc_handler(hba); 3980
3706 spin_unlock_irqrestore(hba->host->host_lock, flags); 3981 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
3982 bool ret;
3707 3983
3984 spin_unlock_irqrestore(hba->host->host_lock, flags);
3985 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
3986 ret = ufshcd_quirk_dl_nac_errors(hba);
3987 spin_lock_irqsave(hba->host->host_lock, flags);
3988 if (!ret)
3989 goto skip_err_handling;
3990 }
3991 if ((hba->saved_err & INT_FATAL_ERRORS) ||
3992 ((hba->saved_err & UIC_ERROR) &&
3993 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
3994 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
3995 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
3996 needs_reset = true;
3997
3998 /*
3999 * if host reset is required then skip clearing the pending
4000 * transfers forcefully because they will automatically get
4001 * cleared after link startup.
4002 */
4003 if (needs_reset)
4004 goto skip_pending_xfer_clear;
4005
4006 /* release lock as clear command might sleep */
4007 spin_unlock_irqrestore(hba->host->host_lock, flags);
3708 /* Clear pending transfer requests */ 4008 /* Clear pending transfer requests */
3709 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) 4009 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
3710 if (ufshcd_clear_cmd(hba, tag)) 4010 if (ufshcd_clear_cmd(hba, tag)) {
3711 err_xfer |= 1 << tag; 4011 err_xfer = true;
4012 goto lock_skip_pending_xfer_clear;
4013 }
4014 }
3712 4015
3713 /* Clear pending task management requests */ 4016 /* Clear pending task management requests */
3714 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) 4017 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
3715 if (ufshcd_clear_tm_cmd(hba, tag)) 4018 if (ufshcd_clear_tm_cmd(hba, tag)) {
3716 err_tm |= 1 << tag; 4019 err_tm = true;
4020 goto lock_skip_pending_xfer_clear;
4021 }
4022 }
3717 4023
3718 /* Complete the requests that are cleared by s/w */ 4024lock_skip_pending_xfer_clear:
3719 spin_lock_irqsave(hba->host->host_lock, flags); 4025 spin_lock_irqsave(hba->host->host_lock, flags);
3720 ufshcd_transfer_req_compl(hba);
3721 ufshcd_tmc_handler(hba);
3722 spin_unlock_irqrestore(hba->host->host_lock, flags);
3723 4026
4027 /* Complete the requests that are cleared by s/w */
4028 ufshcd_complete_requests(hba);
4029
4030 if (err_xfer || err_tm)
4031 needs_reset = true;
4032
4033skip_pending_xfer_clear:
3724 /* Fatal errors need reset */ 4034 /* Fatal errors need reset */
3725 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || 4035 if (needs_reset) {
3726 ((hba->saved_err & UIC_ERROR) && 4036 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
3727 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { 4037
4038 /*
4039 * ufshcd_reset_and_restore() does the link reinitialization
4040 * which will need atleast one empty doorbell slot to send the
4041 * device management commands (NOP and query commands).
4042 * If there is no slot empty at this moment then free up last
4043 * slot forcefully.
4044 */
4045 if (hba->outstanding_reqs == max_doorbells)
4046 __ufshcd_transfer_req_compl(hba,
4047 (1UL << (hba->nutrs - 1)));
4048
4049 spin_unlock_irqrestore(hba->host->host_lock, flags);
3728 err = ufshcd_reset_and_restore(hba); 4050 err = ufshcd_reset_and_restore(hba);
4051 spin_lock_irqsave(hba->host->host_lock, flags);
3729 if (err) { 4052 if (err) {
3730 dev_err(hba->dev, "%s: reset and restore failed\n", 4053 dev_err(hba->dev, "%s: reset and restore failed\n",
3731 __func__); 4054 __func__);
@@ -3739,9 +4062,19 @@ static void ufshcd_err_handler(struct work_struct *work)
3739 hba->saved_err = 0; 4062 hba->saved_err = 0;
3740 hba->saved_uic_err = 0; 4063 hba->saved_uic_err = 0;
3741 } 4064 }
4065
4066skip_err_handling:
4067 if (!needs_reset) {
4068 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4069 if (hba->saved_err || hba->saved_uic_err)
4070 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
4071 __func__, hba->saved_err, hba->saved_uic_err);
4072 }
4073
3742 ufshcd_clear_eh_in_progress(hba); 4074 ufshcd_clear_eh_in_progress(hba);
3743 4075
3744out: 4076out:
4077 spin_unlock_irqrestore(hba->host->host_lock, flags);
3745 scsi_unblock_requests(hba->host); 4078 scsi_unblock_requests(hba->host);
3746 ufshcd_release(hba); 4079 ufshcd_release(hba);
3747 pm_runtime_put_sync(hba->dev); 4080 pm_runtime_put_sync(hba->dev);
@@ -3759,6 +4092,14 @@ static void ufshcd_update_uic_error(struct ufs_hba *hba)
3759 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); 4092 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
3760 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) 4093 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
3761 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; 4094 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
4095 else if (hba->dev_quirks &
4096 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
4097 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
4098 hba->uic_error |=
4099 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
4100 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
4101 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
4102 }
3762 4103
3763 /* UIC NL/TL/DME errors needs software retry */ 4104 /* UIC NL/TL/DME errors needs software retry */
3764 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); 4105 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
@@ -3796,15 +4137,18 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
3796 } 4137 }
3797 4138
3798 if (queue_eh_work) { 4139 if (queue_eh_work) {
4140 /*
4141 * update the transfer error masks to sticky bits, let's do this
4142 * irrespective of current ufshcd_state.
4143 */
4144 hba->saved_err |= hba->errors;
4145 hba->saved_uic_err |= hba->uic_error;
4146
3799 /* handle fatal errors only when link is functional */ 4147 /* handle fatal errors only when link is functional */
3800 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { 4148 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
3801 /* block commands from scsi mid-layer */ 4149 /* block commands from scsi mid-layer */
3802 scsi_block_requests(hba->host); 4150 scsi_block_requests(hba->host);
3803 4151
3804 /* transfer error masks to sticky bits */
3805 hba->saved_err |= hba->errors;
3806 hba->saved_uic_err |= hba->uic_error;
3807
3808 hba->ufshcd_state = UFSHCD_STATE_ERROR; 4152 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3809 schedule_work(&hba->eh_work); 4153 schedule_work(&hba->eh_work);
3810 } 4154 }
@@ -3897,7 +4241,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
3897 /* poll for max. 1 sec to clear door bell register by h/w */ 4241 /* poll for max. 1 sec to clear door bell register by h/w */
3898 err = ufshcd_wait_for_register(hba, 4242 err = ufshcd_wait_for_register(hba,
3899 REG_UTP_TASK_REQ_DOOR_BELL, 4243 REG_UTP_TASK_REQ_DOOR_BELL,
3900 mask, 0, 1000, 1000); 4244 mask, 0, 1000, 1000, true);
3901out: 4245out:
3902 return err; 4246 return err;
3903} 4247}
@@ -4179,7 +4523,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
4179 4523
4180 /* Reset the host controller */ 4524 /* Reset the host controller */
4181 spin_lock_irqsave(hba->host->host_lock, flags); 4525 spin_lock_irqsave(hba->host->host_lock, flags);
4182 ufshcd_hba_stop(hba); 4526 ufshcd_hba_stop(hba, false);
4183 spin_unlock_irqrestore(hba->host->host_lock, flags); 4527 spin_unlock_irqrestore(hba->host->host_lock, flags);
4184 4528
4185 err = ufshcd_hba_enable(hba); 4529 err = ufshcd_hba_enable(hba);
@@ -4466,6 +4810,164 @@ out:
4466 return ret; 4810 return ret;
4467} 4811}
4468 4812
4813static int ufs_get_device_info(struct ufs_hba *hba,
4814 struct ufs_device_info *card_data)
4815{
4816 int err;
4817 u8 model_index;
4818 u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
4819 u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
4820
4821 err = ufshcd_read_device_desc(hba, desc_buf,
4822 QUERY_DESC_DEVICE_MAX_SIZE);
4823 if (err) {
4824 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
4825 __func__, err);
4826 goto out;
4827 }
4828
4829 /*
4830 * getting vendor (manufacturerID) and Bank Index in big endian
4831 * format
4832 */
4833 card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
4834 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
4835
4836 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
4837
4838 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
4839 QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
4840 if (err) {
4841 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
4842 __func__, err);
4843 goto out;
4844 }
4845
4846 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
4847 strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
4848 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
4849 MAX_MODEL_LEN));
4850
4851 /* Null terminate the model string */
4852 card_data->model[MAX_MODEL_LEN] = '\0';
4853
4854out:
4855 return err;
4856}
4857
4858void ufs_advertise_fixup_device(struct ufs_hba *hba)
4859{
4860 int err;
4861 struct ufs_dev_fix *f;
4862 struct ufs_device_info card_data;
4863
4864 card_data.wmanufacturerid = 0;
4865
4866 err = ufs_get_device_info(hba, &card_data);
4867 if (err) {
4868 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
4869 __func__, err);
4870 return;
4871 }
4872
4873 for (f = ufs_fixups; f->quirk; f++) {
4874 if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
4875 (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
4876 (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
4877 !strcmp(f->card.model, UFS_ANY_MODEL)))
4878 hba->dev_quirks |= f->quirk;
4879 }
4880}
4881
4882/**
4883 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
4884 * @hba: per-adapter instance
4885 *
4886 * PA_TActivate parameter can be tuned manually if UniPro version is less than
4887 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
4888 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
4889 * the hibern8 exit latency.
4890 *
4891 * Returns zero on success, non-zero error value on failure.
4892 */
4893static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
4894{
4895 int ret = 0;
4896 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
4897
4898 ret = ufshcd_dme_peer_get(hba,
4899 UIC_ARG_MIB_SEL(
4900 RX_MIN_ACTIVATETIME_CAPABILITY,
4901 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4902 &peer_rx_min_activatetime);
4903 if (ret)
4904 goto out;
4905
4906 /* make sure proper unit conversion is applied */
4907 tuned_pa_tactivate =
4908 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
4909 / PA_TACTIVATE_TIME_UNIT_US);
4910 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
4911 tuned_pa_tactivate);
4912
4913out:
4914 return ret;
4915}
4916
4917/**
4918 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
4919 * @hba: per-adapter instance
4920 *
4921 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
4922 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
4923 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
4924 * This optimal value can help reduce the hibern8 exit latency.
4925 *
4926 * Returns zero on success, non-zero error value on failure.
4927 */
4928static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
4929{
4930 int ret = 0;
4931 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
4932 u32 max_hibern8_time, tuned_pa_hibern8time;
4933
4934 ret = ufshcd_dme_get(hba,
4935 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
4936 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
4937 &local_tx_hibern8_time_cap);
4938 if (ret)
4939 goto out;
4940
4941 ret = ufshcd_dme_peer_get(hba,
4942 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
4943 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
4944 &peer_rx_hibern8_time_cap);
4945 if (ret)
4946 goto out;
4947
4948 max_hibern8_time = max(local_tx_hibern8_time_cap,
4949 peer_rx_hibern8_time_cap);
4950 /* make sure proper unit conversion is applied */
4951 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
4952 / PA_HIBERN8_TIME_UNIT_US);
4953 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
4954 tuned_pa_hibern8time);
4955out:
4956 return ret;
4957}
4958
4959static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
4960{
4961 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
4962 ufshcd_tune_pa_tactivate(hba);
4963 ufshcd_tune_pa_hibern8time(hba);
4964 }
4965
4966 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
4967 /* set 1ms timeout for PA_TACTIVATE */
4968 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
4969}
4970
4469/** 4971/**
4470 * ufshcd_probe_hba - probe hba to detect device and initialize 4972 * ufshcd_probe_hba - probe hba to detect device and initialize
4471 * @hba: per-adapter instance 4973 * @hba: per-adapter instance
@@ -4482,6 +4984,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4482 4984
4483 ufshcd_init_pwr_info(hba); 4985 ufshcd_init_pwr_info(hba);
4484 4986
4987 /* set the default level for urgent bkops */
4988 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
4989 hba->is_urgent_bkops_lvl_checked = false;
4990
4485 /* UniPro link is active now */ 4991 /* UniPro link is active now */
4486 ufshcd_set_link_active(hba); 4992 ufshcd_set_link_active(hba);
4487 4993
@@ -4493,6 +4999,14 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4493 if (ret) 4999 if (ret)
4494 goto out; 5000 goto out;
4495 5001
5002 ufs_advertise_fixup_device(hba);
5003 ufshcd_tune_unipro_params(hba);
5004
5005 ret = ufshcd_set_vccq_rail_unused(hba,
5006 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
5007 if (ret)
5008 goto out;
5009
4496 /* UFS device is also active now */ 5010 /* UFS device is also active now */
4497 ufshcd_set_ufs_dev_active(hba); 5011 ufshcd_set_ufs_dev_active(hba);
4498 ufshcd_force_reset_auto_bkops(hba); 5012 ufshcd_force_reset_auto_bkops(hba);
@@ -4567,6 +5081,41 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
4567 ufshcd_probe_hba(hba); 5081 ufshcd_probe_hba(hba);
4568} 5082}
4569 5083
5084static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
5085{
5086 unsigned long flags;
5087 struct Scsi_Host *host;
5088 struct ufs_hba *hba;
5089 int index;
5090 bool found = false;
5091
5092 if (!scmd || !scmd->device || !scmd->device->host)
5093 return BLK_EH_NOT_HANDLED;
5094
5095 host = scmd->device->host;
5096 hba = shost_priv(host);
5097 if (!hba)
5098 return BLK_EH_NOT_HANDLED;
5099
5100 spin_lock_irqsave(host->host_lock, flags);
5101
5102 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
5103 if (hba->lrb[index].cmd == scmd) {
5104 found = true;
5105 break;
5106 }
5107 }
5108
5109 spin_unlock_irqrestore(host->host_lock, flags);
5110
5111 /*
5112 * Bypass SCSI error handling and reset the block layer timer if this
5113 * SCSI command was not actually dispatched to UFS driver, otherwise
5114 * let SCSI layer handle the error as usual.
5115 */
5116 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
5117}
5118
4570static struct scsi_host_template ufshcd_driver_template = { 5119static struct scsi_host_template ufshcd_driver_template = {
4571 .module = THIS_MODULE, 5120 .module = THIS_MODULE,
4572 .name = UFSHCD, 5121 .name = UFSHCD,
@@ -4579,6 +5128,7 @@ static struct scsi_host_template ufshcd_driver_template = {
4579 .eh_abort_handler = ufshcd_abort, 5128 .eh_abort_handler = ufshcd_abort,
4580 .eh_device_reset_handler = ufshcd_eh_device_reset_handler, 5129 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
4581 .eh_host_reset_handler = ufshcd_eh_host_reset_handler, 5130 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
5131 .eh_timed_out = ufshcd_eh_timed_out,
4582 .this_id = -1, 5132 .this_id = -1,
4583 .sg_tablesize = SG_ALL, 5133 .sg_tablesize = SG_ALL,
4584 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 5134 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -4607,13 +5157,24 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4607static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 5157static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4608 struct ufs_vreg *vreg) 5158 struct ufs_vreg *vreg)
4609{ 5159{
4610 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 5160 if (!vreg)
5161 return 0;
5162 else if (vreg->unused)
5163 return 0;
5164 else
5165 return ufshcd_config_vreg_load(hba->dev, vreg,
5166 UFS_VREG_LPM_LOAD_UA);
4611} 5167}
4612 5168
4613static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 5169static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4614 struct ufs_vreg *vreg) 5170 struct ufs_vreg *vreg)
4615{ 5171{
4616 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 5172 if (!vreg)
5173 return 0;
5174 else if (vreg->unused)
5175 return 0;
5176 else
5177 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4617} 5178}
4618 5179
4619static int ufshcd_config_vreg(struct device *dev, 5180static int ufshcd_config_vreg(struct device *dev,
@@ -4648,7 +5209,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
4648{ 5209{
4649 int ret = 0; 5210 int ret = 0;
4650 5211
4651 if (!vreg || vreg->enabled) 5212 if (!vreg)
5213 goto out;
5214 else if (vreg->enabled || vreg->unused)
4652 goto out; 5215 goto out;
4653 5216
4654 ret = ufshcd_config_vreg(dev, vreg, true); 5217 ret = ufshcd_config_vreg(dev, vreg, true);
@@ -4668,7 +5231,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
4668{ 5231{
4669 int ret = 0; 5232 int ret = 0;
4670 5233
4671 if (!vreg || !vreg->enabled) 5234 if (!vreg)
5235 goto out;
5236 else if (!vreg->enabled || vreg->unused)
4672 goto out; 5237 goto out;
4673 5238
4674 ret = regulator_disable(vreg->reg); 5239 ret = regulator_disable(vreg->reg);
@@ -4774,6 +5339,36 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4774 return 0; 5339 return 0;
4775} 5340}
4776 5341
5342static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
5343{
5344 int ret = 0;
5345 struct ufs_vreg_info *info = &hba->vreg_info;
5346
5347 if (!info)
5348 goto out;
5349 else if (!info->vccq)
5350 goto out;
5351
5352 if (unused) {
5353 /* shut off the rail here */
5354 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
5355 /*
5356 * Mark this rail as no longer used, so it doesn't get enabled
5357 * later by mistake
5358 */
5359 if (!ret)
5360 info->vccq->unused = true;
5361 } else {
5362 /*
5363 * rail should have been already enabled hence just make sure
5364 * that unused flag is cleared.
5365 */
5366 info->vccq->unused = false;
5367 }
5368out:
5369 return ret;
5370}
5371
4777static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, 5372static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4778 bool skip_ref_clk) 5373 bool skip_ref_clk)
4779{ 5374{
@@ -5093,10 +5688,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
5093 (!check_for_bkops || (check_for_bkops && 5688 (!check_for_bkops || (check_for_bkops &&
5094 !hba->auto_bkops_enabled))) { 5689 !hba->auto_bkops_enabled))) {
5095 /* 5690 /*
5691 * Let's make sure that link is in low power mode, we are doing
5692 * this currently by putting the link in Hibern8. Otherway to
5693 * put the link in low power mode is to send the DME end point
5694 * to device and then send the DME reset command to local
5695 * unipro. But putting the link in hibern8 is much faster.
5696 */
5697 ret = ufshcd_uic_hibern8_enter(hba);
5698 if (ret)
5699 goto out;
5700 /*
5096 * Change controller state to "reset state" which 5701 * Change controller state to "reset state" which
5097 * should also put the link in off/reset state 5702 * should also put the link in off/reset state
5098 */ 5703 */
5099 ufshcd_hba_stop(hba); 5704 ufshcd_hba_stop(hba, true);
5100 /* 5705 /*
5101 * TODO: Check if we need any delay to make sure that 5706 * TODO: Check if we need any delay to make sure that
5102 * controller is reset 5707 * controller is reset
@@ -5111,6 +5716,16 @@ out:
5111static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) 5716static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
5112{ 5717{
5113 /* 5718 /*
5719 * It seems some UFS devices may keep drawing more than sleep current
5720 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
5721 * To avoid this situation, add 2ms delay before putting these UFS
5722 * rails in LPM mode.
5723 */
5724 if (!ufshcd_is_link_active(hba) &&
5725 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
5726 usleep_range(2000, 2100);
5727
5728 /*
5114 * If UFS device is either in UFS_Sleep turn off VCC rail to save some 5729 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
5115 * power. 5730 * power.
5116 * 5731 *
@@ -5572,7 +6187,7 @@ void ufshcd_remove(struct ufs_hba *hba)
5572 scsi_remove_host(hba->host); 6187 scsi_remove_host(hba->host);
5573 /* disable interrupts */ 6188 /* disable interrupts */
5574 ufshcd_disable_intr(hba, hba->intr_mask); 6189 ufshcd_disable_intr(hba, hba->intr_mask);
5575 ufshcd_hba_stop(hba); 6190 ufshcd_hba_stop(hba, true);
5576 6191
5577 scsi_host_put(hba->host); 6192 scsi_host_put(hba->host);
5578 6193
@@ -5836,6 +6451,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5836 init_waitqueue_head(&hba->dev_cmd.tag_wq); 6451 init_waitqueue_head(&hba->dev_cmd.tag_wq);
5837 6452
5838 ufshcd_init_clk_gating(hba); 6453 ufshcd_init_clk_gating(hba);
6454
6455 /*
6456 * In order to avoid any spurious interrupt immediately after
6457 * registering UFS controller interrupt handler, clear any pending UFS
6458 * interrupt status and disable all the UFS interrupts.
6459 */
6460 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
6461 REG_INTERRUPT_STATUS);
6462 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
6463 /*
6464 * Make sure that UFS interrupts are disabled and any pending interrupt
6465 * status is cleared before registering UFS interrupt handler.
6466 */
6467 mb();
6468
5839 /* IRQ registration */ 6469 /* IRQ registration */
5840 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 6470 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
5841 if (err) { 6471 if (err) {
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e3931d0c94eb..4bb65669f052 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -54,6 +54,7 @@
54#include <linux/clk.h> 54#include <linux/clk.h>
55#include <linux/completion.h> 55#include <linux/completion.h>
56#include <linux/regulator/consumer.h> 56#include <linux/regulator/consumer.h>
57#include "unipro.h"
57 58
58#include <asm/irq.h> 59#include <asm/irq.h>
59#include <asm/byteorder.h> 60#include <asm/byteorder.h>
@@ -383,6 +384,9 @@ struct ufs_init_prefetch {
383 * @clk_list_head: UFS host controller clocks list node head 384 * @clk_list_head: UFS host controller clocks list node head
384 * @pwr_info: holds current power mode 385 * @pwr_info: holds current power mode
385 * @max_pwr_info: keeps the device max valid pwm 386 * @max_pwr_info: keeps the device max valid pwm
387 * @urgent_bkops_lvl: keeps track of urgent bkops level for device
388 * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
389 * device is known or not.
386 */ 390 */
387struct ufs_hba { 391struct ufs_hba {
388 void __iomem *mmio_base; 392 void __iomem *mmio_base;
@@ -470,6 +474,9 @@ struct ufs_hba {
470 474
471 unsigned int quirks; /* Deviations from standard UFSHCI spec. */ 475 unsigned int quirks; /* Deviations from standard UFSHCI spec. */
472 476
477 /* Device deviations from standard UFS device spec. */
478 unsigned int dev_quirks;
479
473 wait_queue_head_t tm_wq; 480 wait_queue_head_t tm_wq;
474 wait_queue_head_t tm_tag_wq; 481 wait_queue_head_t tm_tag_wq;
475 unsigned long tm_condition; 482 unsigned long tm_condition;
@@ -509,6 +516,8 @@ struct ufs_hba {
509 516
510 bool wlun_dev_clr_ua; 517 bool wlun_dev_clr_ua;
511 518
519 /* Number of lanes available (1 or 2) for Rx/Tx */
520 u32 lanes_per_direction;
512 struct ufs_pa_layer_attr pwr_info; 521 struct ufs_pa_layer_attr pwr_info;
513 struct ufs_pwr_mode_info max_pwr_info; 522 struct ufs_pwr_mode_info max_pwr_info;
514 523
@@ -533,6 +542,9 @@ struct ufs_hba {
533 struct devfreq *devfreq; 542 struct devfreq *devfreq;
534 struct ufs_clk_scaling clk_scaling; 543 struct ufs_clk_scaling clk_scaling;
535 bool is_sys_suspended; 544 bool is_sys_suspended;
545
546 enum bkops_status urgent_bkops_lvl;
547 bool is_urgent_bkops_lvl_checked;
536}; 548};
537 549
538/* Returns true if clocks can be gated. Otherwise false */ 550/* Returns true if clocks can be gated. Otherwise false */
@@ -588,15 +600,9 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
588void ufshcd_dealloc_host(struct ufs_hba *); 600void ufshcd_dealloc_host(struct ufs_hba *);
589int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); 601int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
590void ufshcd_remove(struct ufs_hba *); 602void ufshcd_remove(struct ufs_hba *);
591 603int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
592/** 604 u32 val, unsigned long interval_us,
593 * ufshcd_hba_stop - Send controller to reset state 605 unsigned long timeout_ms, bool can_sleep);
594 * @hba: per adapter instance
595 */
596static inline void ufshcd_hba_stop(struct ufs_hba *hba)
597{
598 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
599}
600 606
601static inline void check_upiu_size(void) 607static inline void check_upiu_size(void)
602{ 608{
@@ -682,11 +688,27 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
682 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 688 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
683} 689}
684 690
691int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
692
693static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
694{
695 return (pwr_info->pwr_rx == FAST_MODE ||
696 pwr_info->pwr_rx == FASTAUTO_MODE) &&
697 (pwr_info->pwr_tx == FAST_MODE ||
698 pwr_info->pwr_tx == FASTAUTO_MODE);
699}
700
701#define ASCII_STD true
702
703int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
704 u32 size, bool ascii);
705
685/* Expose Query-Request API */ 706/* Expose Query-Request API */
686int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, 707int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
687 enum flag_idn idn, bool *flag_res); 708 enum flag_idn idn, bool *flag_res);
688int ufshcd_hold(struct ufs_hba *hba, bool async); 709int ufshcd_hold(struct ufs_hba *hba, bool async);
689void ufshcd_release(struct ufs_hba *hba); 710void ufshcd_release(struct ufs_hba *hba);
711u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
690 712
691/* Wrapper functions for safely calling variant operations */ 713/* Wrapper functions for safely calling variant operations */
692static inline const char *ufshcd_get_var_name(struct ufs_hba *hba) 714static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0ae0967aaed8..4cb1cc63f1a1 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -92,6 +92,7 @@ enum {
92 UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */ 92 UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
93 UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */ 93 UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
94 UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */ 94 UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
95 UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
95}; 96};
96 97
97/* 98/*
@@ -170,6 +171,8 @@ enum {
170#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31) 171#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
171#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF 172#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
172#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000 173#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
174#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
175#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
173 176
174/* UECN - Host UIC Error Code Network Layer 40h */ 177/* UECN - Host UIC Error Code Network Layer 40h */
175#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31) 178#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
@@ -209,6 +212,7 @@ enum {
209 212
210/* GenSelectorIndex calculation macros for M-PHY attributes */ 213/* GenSelectorIndex calculation macros for M-PHY attributes */
211#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane) 214#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
215#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
212 216
213#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ 217#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
214 ((sel) & 0xFFFF)) 218 ((sel) & 0xFFFF))
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 816a8a46efb8..e2854e45f8d3 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -15,6 +15,7 @@
15/* 15/*
16 * M-TX Configuration Attributes 16 * M-TX Configuration Attributes
17 */ 17 */
18#define TX_HIBERN8TIME_CAPABILITY 0x000F
18#define TX_MODE 0x0021 19#define TX_MODE 0x0021
19#define TX_HSRATE_SERIES 0x0022 20#define TX_HSRATE_SERIES 0x0022
20#define TX_HSGEAR 0x0023 21#define TX_HSGEAR 0x0023
@@ -48,8 +49,12 @@
48#define RX_ENTER_HIBERN8 0x00A7 49#define RX_ENTER_HIBERN8 0x00A7
49#define RX_BYPASS_8B10B_ENABLE 0x00A8 50#define RX_BYPASS_8B10B_ENABLE 0x00A8
50#define RX_TERMINATION_FORCE_ENABLE 0x0089 51#define RX_TERMINATION_FORCE_ENABLE 0x0089
52#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
53#define RX_HIBERN8TIME_CAPABILITY 0x0092
51 54
52#define is_mphy_tx_attr(attr) (attr < RX_MODE) 55#define is_mphy_tx_attr(attr) (attr < RX_MODE)
56#define RX_MIN_ACTIVATETIME_UNIT_US 100
57#define HIBERN8TIME_UNIT_US 100
53/* 58/*
54 * PHY Adpater attributes 59 * PHY Adpater attributes
55 */ 60 */
@@ -70,6 +75,7 @@
70#define PA_MAXRXSPEEDFAST 0x1541 75#define PA_MAXRXSPEEDFAST 0x1541
71#define PA_MAXRXSPEEDSLOW 0x1542 76#define PA_MAXRXSPEEDSLOW 0x1542
72#define PA_TXLINKSTARTUPHS 0x1544 77#define PA_TXLINKSTARTUPHS 0x1544
78#define PA_LOCAL_TX_LCC_ENABLE 0x155E
73#define PA_TXSPEEDFAST 0x1565 79#define PA_TXSPEEDFAST 0x1565
74#define PA_TXSPEEDSLOW 0x1566 80#define PA_TXSPEEDSLOW 0x1566
75#define PA_REMOTEVERINFO 0x15A0 81#define PA_REMOTEVERINFO 0x15A0
@@ -110,6 +116,12 @@
110#define PA_STALLNOCONFIGTIME 0x15A3 116#define PA_STALLNOCONFIGTIME 0x15A3
111#define PA_SAVECONFIGTIME 0x15A4 117#define PA_SAVECONFIGTIME 0x15A4
112 118
119#define PA_TACTIVATE_TIME_UNIT_US 10
120#define PA_HIBERN8_TIME_UNIT_US 100
121
122/* PHY Adapter Protocol Constants */
123#define PA_MAXDATALANES 4
124
113/* PA power modes */ 125/* PA power modes */
114enum { 126enum {
115 FAST_MODE = 1, 127 FAST_MODE = 1,
@@ -143,6 +155,16 @@ enum ufs_hs_gear_tag {
143 UFS_HS_G3, /* HS Gear 3 */ 155 UFS_HS_G3, /* HS Gear 3 */
144}; 156};
145 157
158enum ufs_unipro_ver {
159 UFS_UNIPRO_VER_RESERVED = 0,
160 UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
161 UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
162 UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
163 UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
164 /* UniPro version field mask in PA_LOCALVERINFO */
165 UFS_UNIPRO_VER_MASK = 0xF,
166};
167
146/* 168/*
147 * Data Link Layer Attributes 169 * Data Link Layer Attributes
148 */ 170 */