diff options
Diffstat (limited to 'drivers/phy/rockchip/phy-rockchip-pcie.c')
| -rw-r--r-- | drivers/phy/rockchip/phy-rockchip-pcie.c | 131 |
1 files changed, 117 insertions, 14 deletions
diff --git a/drivers/phy/rockchip/phy-rockchip-pcie.c b/drivers/phy/rockchip/phy-rockchip-pcie.c index 6904633cad68..7cbdde029c0a 100644 --- a/drivers/phy/rockchip/phy-rockchip-pcie.c +++ b/drivers/phy/rockchip/phy-rockchip-pcie.c | |||
| @@ -73,10 +73,38 @@ struct rockchip_pcie_data { | |||
| 73 | struct rockchip_pcie_phy { | 73 | struct rockchip_pcie_phy { |
| 74 | struct rockchip_pcie_data *phy_data; | 74 | struct rockchip_pcie_data *phy_data; |
| 75 | struct regmap *reg_base; | 75 | struct regmap *reg_base; |
| 76 | struct phy_pcie_instance { | ||
| 77 | struct phy *phy; | ||
| 78 | u32 index; | ||
| 79 | } phys[PHY_MAX_LANE_NUM]; | ||
| 80 | struct mutex pcie_mutex; | ||
| 76 | struct reset_control *phy_rst; | 81 | struct reset_control *phy_rst; |
| 77 | struct clk *clk_pciephy_ref; | 82 | struct clk *clk_pciephy_ref; |
| 83 | int pwr_cnt; | ||
| 84 | int init_cnt; | ||
| 78 | }; | 85 | }; |
| 79 | 86 | ||
| 87 | static struct rockchip_pcie_phy *to_pcie_phy(struct phy_pcie_instance *inst) | ||
| 88 | { | ||
| 89 | return container_of(inst, struct rockchip_pcie_phy, | ||
| 90 | phys[inst->index]); | ||
| 91 | } | ||
| 92 | |||
| 93 | static struct phy *rockchip_pcie_phy_of_xlate(struct device *dev, | ||
| 94 | struct of_phandle_args *args) | ||
| 95 | { | ||
| 96 | struct rockchip_pcie_phy *rk_phy = dev_get_drvdata(dev); | ||
| 97 | |||
| 98 | if (args->args_count == 0) | ||
| 99 | return rk_phy->phys[0].phy; | ||
| 100 | |||
| 101 | if (WARN_ON(args->args[0] >= PHY_MAX_LANE_NUM)) | ||
| 102 | return ERR_PTR(-ENODEV); | ||
| 103 | |||
| 104 | return rk_phy->phys[args->args[0]].phy; | ||
| 105 | } | ||
| 106 | |||
| 107 | |||
| 80 | static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy, | 108 | static inline void phy_wr_cfg(struct rockchip_pcie_phy *rk_phy, |
| 81 | u32 addr, u32 data) | 109 | u32 addr, u32 data) |
| 82 | { | 110 | { |
| @@ -116,29 +144,59 @@ static inline u32 phy_rd_cfg(struct rockchip_pcie_phy *rk_phy, | |||
| 116 | 144 | ||
| 117 | static int rockchip_pcie_phy_power_off(struct phy *phy) | 145 | static int rockchip_pcie_phy_power_off(struct phy *phy) |
| 118 | { | 146 | { |
| 119 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); | 147 | struct phy_pcie_instance *inst = phy_get_drvdata(phy); |
| 148 | struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); | ||
| 120 | int err = 0; | 149 | int err = 0; |
| 121 | 150 | ||
| 151 | mutex_lock(&rk_phy->pcie_mutex); | ||
| 152 | |||
| 153 | regmap_write(rk_phy->reg_base, | ||
| 154 | rk_phy->phy_data->pcie_laneoff, | ||
| 155 | HIWORD_UPDATE(PHY_LANE_IDLE_OFF, | ||
| 156 | PHY_LANE_IDLE_MASK, | ||
| 157 | PHY_LANE_IDLE_A_SHIFT + inst->index)); | ||
| 158 | |||
| 159 | if (--rk_phy->pwr_cnt) | ||
| 160 | goto err_out; | ||
| 161 | |||
| 122 | err = reset_control_assert(rk_phy->phy_rst); | 162 | err = reset_control_assert(rk_phy->phy_rst); |
| 123 | if (err) { | 163 | if (err) { |
| 124 | dev_err(&phy->dev, "assert phy_rst err %d\n", err); | 164 | dev_err(&phy->dev, "assert phy_rst err %d\n", err); |
| 125 | return err; | 165 | goto err_restore; |
| 126 | } | 166 | } |
| 127 | 167 | ||
| 168 | err_out: | ||
| 169 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 128 | return 0; | 170 | return 0; |
| 171 | |||
| 172 | err_restore: | ||
| 173 | rk_phy->pwr_cnt++; | ||
| 174 | regmap_write(rk_phy->reg_base, | ||
| 175 | rk_phy->phy_data->pcie_laneoff, | ||
| 176 | HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, | ||
| 177 | PHY_LANE_IDLE_MASK, | ||
| 178 | PHY_LANE_IDLE_A_SHIFT + inst->index)); | ||
| 179 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 180 | return err; | ||
| 129 | } | 181 | } |
| 130 | 182 | ||
| 131 | static int rockchip_pcie_phy_power_on(struct phy *phy) | 183 | static int rockchip_pcie_phy_power_on(struct phy *phy) |
| 132 | { | 184 | { |
| 133 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); | 185 | struct phy_pcie_instance *inst = phy_get_drvdata(phy); |
| 186 | struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); | ||
| 134 | int err = 0; | 187 | int err = 0; |
| 135 | u32 status; | 188 | u32 status; |
| 136 | unsigned long timeout; | 189 | unsigned long timeout; |
| 137 | 190 | ||
| 191 | mutex_lock(&rk_phy->pcie_mutex); | ||
| 192 | |||
| 193 | if (rk_phy->pwr_cnt++) | ||
| 194 | goto err_out; | ||
| 195 | |||
| 138 | err = reset_control_deassert(rk_phy->phy_rst); | 196 | err = reset_control_deassert(rk_phy->phy_rst); |
| 139 | if (err) { | 197 | if (err) { |
| 140 | dev_err(&phy->dev, "deassert phy_rst err %d\n", err); | 198 | dev_err(&phy->dev, "deassert phy_rst err %d\n", err); |
| 141 | return err; | 199 | goto err_pwr_cnt; |
| 142 | } | 200 | } |
| 143 | 201 | ||
| 144 | regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, | 202 | regmap_write(rk_phy->reg_base, rk_phy->phy_data->pcie_conf, |
| @@ -146,6 +204,12 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) | |||
| 146 | PHY_CFG_ADDR_MASK, | 204 | PHY_CFG_ADDR_MASK, |
| 147 | PHY_CFG_ADDR_SHIFT)); | 205 | PHY_CFG_ADDR_SHIFT)); |
| 148 | 206 | ||
| 207 | regmap_write(rk_phy->reg_base, | ||
| 208 | rk_phy->phy_data->pcie_laneoff, | ||
| 209 | HIWORD_UPDATE(!PHY_LANE_IDLE_OFF, | ||
| 210 | PHY_LANE_IDLE_MASK, | ||
| 211 | PHY_LANE_IDLE_A_SHIFT + inst->index)); | ||
| 212 | |||
| 149 | /* | 213 | /* |
| 150 | * No documented timeout value for phy operation below, | 214 | * No documented timeout value for phy operation below, |
| 151 | * so we make it large enough here. And we use loop-break | 215 | * so we make it large enough here. And we use loop-break |
| @@ -214,18 +278,29 @@ static int rockchip_pcie_phy_power_on(struct phy *phy) | |||
| 214 | goto err_pll_lock; | 278 | goto err_pll_lock; |
| 215 | } | 279 | } |
| 216 | 280 | ||
| 281 | err_out: | ||
| 282 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 217 | return 0; | 283 | return 0; |
| 218 | 284 | ||
| 219 | err_pll_lock: | 285 | err_pll_lock: |
| 220 | reset_control_assert(rk_phy->phy_rst); | 286 | reset_control_assert(rk_phy->phy_rst); |
| 287 | err_pwr_cnt: | ||
| 288 | rk_phy->pwr_cnt--; | ||
| 289 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 221 | return err; | 290 | return err; |
| 222 | } | 291 | } |
| 223 | 292 | ||
| 224 | static int rockchip_pcie_phy_init(struct phy *phy) | 293 | static int rockchip_pcie_phy_init(struct phy *phy) |
| 225 | { | 294 | { |
| 226 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); | 295 | struct phy_pcie_instance *inst = phy_get_drvdata(phy); |
| 296 | struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); | ||
| 227 | int err = 0; | 297 | int err = 0; |
| 228 | 298 | ||
| 299 | mutex_lock(&rk_phy->pcie_mutex); | ||
| 300 | |||
| 301 | if (rk_phy->init_cnt++) | ||
| 302 | goto err_out; | ||
| 303 | |||
| 229 | err = clk_prepare_enable(rk_phy->clk_pciephy_ref); | 304 | err = clk_prepare_enable(rk_phy->clk_pciephy_ref); |
| 230 | if (err) { | 305 | if (err) { |
| 231 | dev_err(&phy->dev, "Fail to enable pcie ref clock.\n"); | 306 | dev_err(&phy->dev, "Fail to enable pcie ref clock.\n"); |
| @@ -238,20 +313,33 @@ static int rockchip_pcie_phy_init(struct phy *phy) | |||
| 238 | goto err_reset; | 313 | goto err_reset; |
| 239 | } | 314 | } |
| 240 | 315 | ||
| 241 | return err; | 316 | err_out: |
| 317 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 318 | return 0; | ||
| 242 | 319 | ||
| 243 | err_reset: | 320 | err_reset: |
| 321 | |||
| 244 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); | 322 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); |
| 245 | err_refclk: | 323 | err_refclk: |
| 324 | rk_phy->init_cnt--; | ||
| 325 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 246 | return err; | 326 | return err; |
| 247 | } | 327 | } |
| 248 | 328 | ||
| 249 | static int rockchip_pcie_phy_exit(struct phy *phy) | 329 | static int rockchip_pcie_phy_exit(struct phy *phy) |
| 250 | { | 330 | { |
| 251 | struct rockchip_pcie_phy *rk_phy = phy_get_drvdata(phy); | 331 | struct phy_pcie_instance *inst = phy_get_drvdata(phy); |
| 332 | struct rockchip_pcie_phy *rk_phy = to_pcie_phy(inst); | ||
| 333 | |||
| 334 | mutex_lock(&rk_phy->pcie_mutex); | ||
| 335 | |||
| 336 | if (--rk_phy->init_cnt) | ||
| 337 | goto err_init_cnt; | ||
| 252 | 338 | ||
| 253 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); | 339 | clk_disable_unprepare(rk_phy->clk_pciephy_ref); |
| 254 | 340 | ||
| 341 | err_init_cnt: | ||
| 342 | mutex_unlock(&rk_phy->pcie_mutex); | ||
| 255 | return 0; | 343 | return 0; |
| 256 | } | 344 | } |
| 257 | 345 | ||
| @@ -283,10 +371,11 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) | |||
| 283 | { | 371 | { |
| 284 | struct device *dev = &pdev->dev; | 372 | struct device *dev = &pdev->dev; |
| 285 | struct rockchip_pcie_phy *rk_phy; | 373 | struct rockchip_pcie_phy *rk_phy; |
| 286 | struct phy *generic_phy; | ||
| 287 | struct phy_provider *phy_provider; | 374 | struct phy_provider *phy_provider; |
| 288 | struct regmap *grf; | 375 | struct regmap *grf; |
| 289 | const struct of_device_id *of_id; | 376 | const struct of_device_id *of_id; |
| 377 | int i; | ||
| 378 | u32 phy_num; | ||
| 290 | 379 | ||
| 291 | grf = syscon_node_to_regmap(dev->parent->of_node); | 380 | grf = syscon_node_to_regmap(dev->parent->of_node); |
| 292 | if (IS_ERR(grf)) { | 381 | if (IS_ERR(grf)) { |
| @@ -305,6 +394,8 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) | |||
| 305 | rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data; | 394 | rk_phy->phy_data = (struct rockchip_pcie_data *)of_id->data; |
| 306 | rk_phy->reg_base = grf; | 395 | rk_phy->reg_base = grf; |
| 307 | 396 | ||
| 397 | mutex_init(&rk_phy->pcie_mutex); | ||
| 398 | |||
| 308 | rk_phy->phy_rst = devm_reset_control_get(dev, "phy"); | 399 | rk_phy->phy_rst = devm_reset_control_get(dev, "phy"); |
| 309 | if (IS_ERR(rk_phy->phy_rst)) { | 400 | if (IS_ERR(rk_phy->phy_rst)) { |
| 310 | if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER) | 401 | if (PTR_ERR(rk_phy->phy_rst) != -EPROBE_DEFER) |
| @@ -319,14 +410,26 @@ static int rockchip_pcie_phy_probe(struct platform_device *pdev) | |||
| 319 | return PTR_ERR(rk_phy->clk_pciephy_ref); | 410 | return PTR_ERR(rk_phy->clk_pciephy_ref); |
| 320 | } | 411 | } |
| 321 | 412 | ||
| 322 | generic_phy = devm_phy_create(dev, dev->of_node, &ops); | 413 | /* parse #phy-cells to see if it's legacy PHY model */ |
| 323 | if (IS_ERR(generic_phy)) { | 414 | if (of_property_read_u32(dev->of_node, "#phy-cells", &phy_num)) |
| 324 | dev_err(dev, "failed to create PHY\n"); | 415 | return -ENOENT; |
| 325 | return PTR_ERR(generic_phy); | 416 | |
| 417 | phy_num = (phy_num == 0) ? 1 : PHY_MAX_LANE_NUM; | ||
| 418 | dev_dbg(dev, "phy number is %d\n", phy_num); | ||
| 419 | |||
| 420 | for (i = 0; i < phy_num; i++) { | ||
| 421 | rk_phy->phys[i].phy = devm_phy_create(dev, dev->of_node, &ops); | ||
| 422 | if (IS_ERR(rk_phy->phys[i].phy)) { | ||
| 423 | dev_err(dev, "failed to create PHY%d\n", i); | ||
| 424 | return PTR_ERR(rk_phy->phys[i].phy); | ||
| 425 | } | ||
| 426 | rk_phy->phys[i].index = i; | ||
| 427 | phy_set_drvdata(rk_phy->phys[i].phy, &rk_phy->phys[i]); | ||
| 326 | } | 428 | } |
| 327 | 429 | ||
| 328 | phy_set_drvdata(generic_phy, rk_phy); | 430 | platform_set_drvdata(pdev, rk_phy); |
| 329 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | 431 | phy_provider = devm_of_phy_provider_register(dev, |
| 432 | rockchip_pcie_phy_of_xlate); | ||
| 330 | 433 | ||
| 331 | return PTR_ERR_OR_ZERO(phy_provider); | 434 | return PTR_ERR_OR_ZERO(phy_provider); |
| 332 | } | 435 | } |
