aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDolev Raviv <draviv@codeaurora.org>2014-09-25 08:32:31 -0400
committerChristoph Hellwig <hch@lst.de>2014-10-01 07:11:23 -0400
commit7eb584db73bebbc9852a14341431ed6935419bec (patch)
treecfd9dd104806fd4a91efdc21b2bf625b831d1e61 /drivers/scsi
parent57d104c153d3d6d7bea60089e80f37501851ed2c (diff)
ufs: refactor configuring power mode
Sometimes, the device shall report its maximum power and speed capabilities, but we might not wish to configure it to use those maximum capabilities. This change adds support for the vendor specific host driver to implement power change notify callback. To enable configuring different power modes (number of lanes, gear number and fast/slow modes) it is necessary to split the configuration stage from the stage that reads the device max power mode. In addition, it is not required to read the configuration more than once, thus the configuration is stored after reading it once. Signed-off-by: Dolev Raviv <draviv@codeaurora.org> Signed-off-by: Yaniv Gardi <ygardi@codeaurora.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/ufs/ufshcd.c166
-rw-r--r--drivers/scsi/ufs/ufshcd.h27
2 files changed, 160 insertions, 33 deletions
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f2b50bc13ac9..8bbb37d7db41 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -179,6 +179,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba);
179static int ufshcd_probe_hba(struct ufs_hba *hba); 179static int ufshcd_probe_hba(struct ufs_hba *hba);
180static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); 180static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
181static irqreturn_t ufshcd_intr(int irq, void *__hba); 181static irqreturn_t ufshcd_intr(int irq, void *__hba);
182static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
183 struct ufs_pa_layer_attr *desired_pwr_mode);
182 184
183static inline int ufshcd_enable_irq(struct ufs_hba *hba) 185static inline int ufshcd_enable_irq(struct ufs_hba *hba)
184{ 186{
@@ -1958,40 +1960,83 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
1958} 1960}
1959 1961
1960/** 1962/**
1961 * ufshcd_config_max_pwr_mode - Set & Change power mode with 1963 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
1962 * maximum capability attribute information. 1964 * @hba: per-adapter instance
1963 * @hba: per adapter instance
1964 *
1965 * Returns 0 on success, non-zero value on failure
1966 */ 1965 */
1967static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) 1966static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
1968{ 1967{
1969 enum {RX = 0, TX = 1}; 1968 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
1970 u32 lanes[] = {1, 1}; 1969
1971 u32 gear[] = {1, 1}; 1970 if (hba->max_pwr_info.is_valid)
1972 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; 1971 return 0;
1973 int ret; 1972
1973 pwr_info->pwr_tx = FASTAUTO_MODE;
1974 pwr_info->pwr_rx = FASTAUTO_MODE;
1975 pwr_info->hs_rate = PA_HS_MODE_B;
1974 1976
1975 /* Get the connected lane count */ 1977 /* Get the connected lane count */
1976 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); 1978 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1977 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); 1979 &pwr_info->lane_rx);
1980 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
1981 &pwr_info->lane_tx);
1982
1983 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
1984 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
1985 __func__,
1986 pwr_info->lane_rx,
1987 pwr_info->lane_tx);
1988 return -EINVAL;
1989 }
1978 1990
1979 /* 1991 /*
1980 * First, get the maximum gears of HS speed. 1992 * First, get the maximum gears of HS speed.
1981 * If a zero value, it means there is no HSGEAR capability. 1993 * If a zero value, it means there is no HSGEAR capability.
1982 * Then, get the maximum gears of PWM speed. 1994 * Then, get the maximum gears of PWM speed.
1983 */ 1995 */
1984 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); 1996 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1985 if (!gear[RX]) { 1997 if (!pwr_info->gear_rx) {
1986 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); 1998 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1987 pwr[RX] = SLOWAUTO_MODE; 1999 &pwr_info->gear_rx);
2000 if (!pwr_info->gear_rx) {
2001 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2002 __func__, pwr_info->gear_rx);
2003 return -EINVAL;
2004 }
2005 pwr_info->pwr_rx = SLOWAUTO_MODE;
1988 } 2006 }
1989 2007
1990 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); 2008 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1991 if (!gear[TX]) { 2009 &pwr_info->gear_tx);
2010 if (!pwr_info->gear_tx) {
1992 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 2011 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1993 &gear[TX]); 2012 &pwr_info->gear_tx);
1994 pwr[TX] = SLOWAUTO_MODE; 2013 if (!pwr_info->gear_tx) {
2014 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2015 __func__, pwr_info->gear_tx);
2016 return -EINVAL;
2017 }
2018 pwr_info->pwr_tx = SLOWAUTO_MODE;
2019 }
2020
2021 hba->max_pwr_info.is_valid = true;
2022 return 0;
2023}
2024
2025static int ufshcd_change_power_mode(struct ufs_hba *hba,
2026 struct ufs_pa_layer_attr *pwr_mode)
2027{
2028 int ret;
2029
2030 /* if already configured to the requested pwr_mode */
2031 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2032 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2033 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2034 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2035 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2036 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2037 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2038 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2039 return 0;
1995 } 2040 }
1996 2041
1997 /* 2042 /*
@@ -2000,23 +2045,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
2000 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 2045 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
2001 * - PA_HSSERIES 2046 * - PA_HSSERIES
2002 */ 2047 */
2003 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); 2048 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
2004 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); 2049 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
2005 if (pwr[RX] == FASTAUTO_MODE) 2050 pwr_mode->lane_rx);
2051 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2052 pwr_mode->pwr_rx == FAST_MODE)
2006 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); 2053 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2054 else
2055 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
2007 2056
2008 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); 2057 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
2009 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); 2058 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
2010 if (pwr[TX] == FASTAUTO_MODE) 2059 pwr_mode->lane_tx);
2060 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2061 pwr_mode->pwr_tx == FAST_MODE)
2011 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); 2062 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2063 else
2064 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
2012 2065
2013 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) 2066 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2014 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); 2067 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2068 pwr_mode->pwr_rx == FAST_MODE ||
2069 pwr_mode->pwr_tx == FAST_MODE)
2070 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2071 pwr_mode->hs_rate);
2015 2072
2016 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); 2073 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
2017 if (ret) 2074 | pwr_mode->pwr_tx);
2075
2076 if (ret) {
2018 dev_err(hba->dev, 2077 dev_err(hba->dev,
2019 "pwr_mode: power mode change failed %d\n", ret); 2078 "%s: power mode change failed %d\n", __func__, ret);
2079 } else {
2080 if (hba->vops && hba->vops->pwr_change_notify)
2081 hba->vops->pwr_change_notify(hba,
2082 POST_CHANGE, NULL, pwr_mode);
2083
2084 memcpy(&hba->pwr_info, pwr_mode,
2085 sizeof(struct ufs_pa_layer_attr));
2086 }
2087
2088 return ret;
2089}
2090
2091/**
2092 * ufshcd_config_pwr_mode - configure a new power mode
2093 * @hba: per-adapter instance
2094 * @desired_pwr_mode: desired power configuration
2095 */
2096static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2097 struct ufs_pa_layer_attr *desired_pwr_mode)
2098{
2099 struct ufs_pa_layer_attr final_params = { 0 };
2100 int ret;
2101
2102 if (hba->vops && hba->vops->pwr_change_notify)
2103 hba->vops->pwr_change_notify(hba,
2104 PRE_CHANGE, desired_pwr_mode, &final_params);
2105 else
2106 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2107
2108 ret = ufshcd_change_power_mode(hba, &final_params);
2020 2109
2021 return ret; 2110 return ret;
2022} 2111}
@@ -3757,7 +3846,16 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
3757 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 3846 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3758 hba->wlun_dev_clr_ua = true; 3847 hba->wlun_dev_clr_ua = true;
3759 3848
3760 ufshcd_config_max_pwr_mode(hba); 3849 if (ufshcd_get_max_pwr_mode(hba)) {
3850 dev_err(hba->dev,
3851 "%s: Failed getting max supported power mode\n",
3852 __func__);
3853 } else {
3854 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
3855 if (ret)
3856 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
3857 __func__, ret);
3858 }
3761 3859
3762 /* 3860 /*
3763 * If we are in error handling context or in power management callbacks 3861 * If we are in error handling context or in power management callbacks
@@ -4920,6 +5018,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
4920 host->unique_id = host->host_no; 5018 host->unique_id = host->host_no;
4921 host->max_cmd_len = MAX_CDB_SIZE; 5019 host->max_cmd_len = MAX_CDB_SIZE;
4922 5020
5021 hba->max_pwr_info.is_valid = false;
5022
4923 /* Initailize wait queue for task management */ 5023 /* Initailize wait queue for task management */
4924 init_waitqueue_head(&hba->tm_wq); 5024 init_waitqueue_head(&hba->tm_wq);
4925 init_waitqueue_head(&hba->tm_tag_wq); 5025 init_waitqueue_head(&hba->tm_tag_wq);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e1bde0598d92..343b18a7a8b0 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -221,6 +221,22 @@ struct ufs_clk_info {
221 221
222#define PRE_CHANGE 0 222#define PRE_CHANGE 0
223#define POST_CHANGE 1 223#define POST_CHANGE 1
224
225struct ufs_pa_layer_attr {
226 u32 gear_rx;
227 u32 gear_tx;
228 u32 lane_rx;
229 u32 lane_tx;
230 u32 pwr_rx;
231 u32 pwr_tx;
232 u32 hs_rate;
233};
234
235struct ufs_pwr_mode_info {
236 bool is_valid;
237 struct ufs_pa_layer_attr info;
238};
239
224/** 240/**
225 * struct ufs_hba_variant_ops - variant specific callbacks 241 * struct ufs_hba_variant_ops - variant specific callbacks
226 * @name: variant name 242 * @name: variant name
@@ -232,6 +248,9 @@ struct ufs_clk_info {
232 * variant specific Uni-Pro initialization. 248 * variant specific Uni-Pro initialization.
233 * @link_startup_notify: called before and after Link startup is carried out 249 * @link_startup_notify: called before and after Link startup is carried out
234 * to allow variant specific Uni-Pro initialization. 250 * to allow variant specific Uni-Pro initialization.
251 * @pwr_change_notify: called before and after a power mode change
252 * is carried out to allow vendor spesific capabilities
253 * to be set.
235 * @suspend: called during host controller PM callback 254 * @suspend: called during host controller PM callback
236 * @resume: called during host controller PM callback 255 * @resume: called during host controller PM callback
237 */ 256 */
@@ -243,6 +262,9 @@ struct ufs_hba_variant_ops {
243 int (*setup_regulators)(struct ufs_hba *, bool); 262 int (*setup_regulators)(struct ufs_hba *, bool);
244 int (*hce_enable_notify)(struct ufs_hba *, bool); 263 int (*hce_enable_notify)(struct ufs_hba *, bool);
245 int (*link_startup_notify)(struct ufs_hba *, bool); 264 int (*link_startup_notify)(struct ufs_hba *, bool);
265 int (*pwr_change_notify)(struct ufs_hba *,
266 bool, struct ufs_pa_layer_attr *,
267 struct ufs_pa_layer_attr *);
246 int (*suspend)(struct ufs_hba *, enum ufs_pm_op); 268 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
247 int (*resume)(struct ufs_hba *, enum ufs_pm_op); 269 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
248}; 270};
@@ -302,6 +324,8 @@ struct ufs_init_prefetch {
302 * @auto_bkops_enabled: to track whether bkops is enabled in device 324 * @auto_bkops_enabled: to track whether bkops is enabled in device
303 * @vreg_info: UFS device voltage regulator information 325 * @vreg_info: UFS device voltage regulator information
304 * @clk_list_head: UFS host controller clocks list node head 326 * @clk_list_head: UFS host controller clocks list node head
327 * @pwr_info: holds current power mode
328 * @max_pwr_info: keeps the device max valid pwm
305 */ 329 */
306struct ufs_hba { 330struct ufs_hba {
307 void __iomem *mmio_base; 331 void __iomem *mmio_base;
@@ -387,6 +411,9 @@ struct ufs_hba {
387 struct list_head clk_list_head; 411 struct list_head clk_list_head;
388 412
389 bool wlun_dev_clr_ua; 413 bool wlun_dev_clr_ua;
414
415 struct ufs_pa_layer_attr pwr_info;
416 struct ufs_pwr_mode_info max_pwr_info;
390}; 417};
391 418
392#define ufshcd_writel(hba, val, reg) \ 419#define ufshcd_writel(hba, val, reg) \